Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,39 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "crc32c/crc32c.h"
#include <cstddef>
#include <cstdint>
#include "./crc32c_arm64.h"
#include "./crc32c_arm64_check.h"
#include "./crc32c_internal.h"
#include "./crc32c_sse42.h"
#include "./crc32c_sse42_check.h"
namespace crc32c {
uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
static bool can_use_sse42 = CanUseSse42();
if (can_use_sse42) return ExtendSse42(crc, data, count);
#elif HAVE_ARM64_CRC32C
static bool can_use_arm64_crc32 = CanUseArm64Crc32();
if (can_use_arm64_crc32) return ExtendArm64(crc, data, count);
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
return ExtendPortable(crc, data, count);
}
extern "C" uint32_t crc32c_extend(uint32_t crc, const uint8_t* data,
size_t count) {
return crc32c::Extend(crc, data, count);
}
extern "C" uint32_t crc32c_value(const uint8_t* data, size_t count) {
return crc32c::Crc32c(data, count);
}
} // namespace crc32c

View file

@ -0,0 +1,123 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "./crc32c_arm64.h"
// In a separate source file to allow this accelerated CRC32C function to be
// compiled with the appropriate compiler flags to enable ARM NEON CRC32C
// instructions.
// This implementation is based on https://github.com/google/leveldb/pull/490.
#include <cstddef>
#include <cstdint>
#include "./crc32c_internal.h"
#include "crc32c/crc32c_config.h"
#if HAVE_ARM64_CRC32C
#include <arm_acle.h>
#include <arm_neon.h>
#define KBYTES 1032
#define SEGMENTBYTES 256
// compute 8bytes for each segment parallelly
#define CRC32C32BYTES(P, IND) \
do { \
crc1 = __crc32cd( \
crc1, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 1 + (IND))); \
crc2 = __crc32cd( \
crc2, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 2 + (IND))); \
crc3 = __crc32cd( \
crc3, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 3 + (IND))); \
crc0 = __crc32cd( \
crc0, *((const uint64_t *)(P) + (SEGMENTBYTES / 8) * 0 + (IND))); \
} while (0);
// compute 8*8 bytes for each segment parallelly
#define CRC32C256BYTES(P, IND) \
do { \
CRC32C32BYTES((P), (IND)*8 + 0) \
CRC32C32BYTES((P), (IND)*8 + 1) \
CRC32C32BYTES((P), (IND)*8 + 2) \
CRC32C32BYTES((P), (IND)*8 + 3) \
CRC32C32BYTES((P), (IND)*8 + 4) \
CRC32C32BYTES((P), (IND)*8 + 5) \
CRC32C32BYTES((P), (IND)*8 + 6) \
CRC32C32BYTES((P), (IND)*8 + 7) \
} while (0);
// compute 4*8*8 bytes for each segment parallelly
#define CRC32C1024BYTES(P) \
do { \
CRC32C256BYTES((P), 0) \
CRC32C256BYTES((P), 1) \
CRC32C256BYTES((P), 2) \
CRC32C256BYTES((P), 3) \
(P) += 4 * SEGMENTBYTES; \
} while (0)
namespace crc32c {
uint32_t ExtendArm64(uint32_t crc, const uint8_t *data, size_t size) {
int64_t length = size;
uint32_t crc0, crc1, crc2, crc3;
uint64_t t0, t1, t2;
// k0=CRC(x^(3*SEGMENTBYTES*8)), k1=CRC(x^(2*SEGMENTBYTES*8)),
// k2=CRC(x^(SEGMENTBYTES*8))
const poly64_t k0 = 0x8d96551c, k1 = 0xbd6f81f8, k2 = 0xdcb17aa4;
crc = crc ^ kCRC32Xor;
while (length >= KBYTES) {
crc0 = crc;
crc1 = 0;
crc2 = 0;
crc3 = 0;
// Process 1024 bytes in parallel.
CRC32C1024BYTES(data);
// Merge the 4 partial CRC32C values.
t2 = (uint64_t)vmull_p64(crc2, k2);
t1 = (uint64_t)vmull_p64(crc1, k1);
t0 = (uint64_t)vmull_p64(crc0, k0);
crc = __crc32cd(crc3, *(uint64_t *)data);
data += sizeof(uint64_t);
crc ^= __crc32cd(0, t2);
crc ^= __crc32cd(0, t1);
crc ^= __crc32cd(0, t0);
length -= KBYTES;
}
while (length >= 8) {
crc = __crc32cd(crc, *(uint64_t *)data);
data += 8;
length -= 8;
}
if (length & 4) {
crc = __crc32cw(crc, *(uint32_t *)data);
data += 4;
}
if (length & 2) {
crc = __crc32ch(crc, *(uint16_t *)data);
data += 2;
}
if (length & 1) {
crc = __crc32cb(crc, *data);
}
return crc ^ kCRC32Xor;
}
} // namespace crc32c
#endif // HAVE_ARM64_CRC32C

View file

@ -0,0 +1,25 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// ARM-specific code
#ifndef CRC32C_CRC32C_ARM_H_
#define CRC32C_CRC32C_ARM_H_
#include <cstddef>
#include <cstdint>
#include "crc32c/crc32c_config.h"
#if HAVE_ARM64_CRC32C
namespace crc32c {
uint32_t ExtendArm64(uint32_t crc, const uint8_t* data, size_t count);
} // namespace crc32c
#endif // HAVE_ARM64_CRC32C
#endif // CRC32C_CRC32C_ARM_H_

View file

@ -0,0 +1,66 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// ARM-specific code checking for the availability of CRC32C instructions.
#ifndef CRC32C_CRC32C_ARM_CHECK_H_
#define CRC32C_CRC32C_ARM_CHECK_H_
#include <cstddef>
#include <cstdint>
#include "crc32c/crc32c_config.h"
#if HAVE_ARM64_CRC32C
#ifdef __linux__
#if HAVE_STRONG_GETAUXVAL
#include <sys/auxv.h>
#elif HAVE_WEAK_GETAUXVAL
// getauxval() is not available on Android until API level 20. Link it as a weak
// symbol.
extern "C" unsigned long getauxval(unsigned long type) __attribute__((weak));
#define AT_HWCAP 16
#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
#endif // defined (__linux__)
#ifdef __APPLE__
#include <sys/types.h>
#include <sys/sysctl.h>
#endif // defined (__APPLE__)
namespace crc32c {
inline bool CanUseArm64Crc32() {
#if defined (__linux__) && (HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL)
// From 'arch/arm64/include/uapi/asm/hwcap.h' in Linux kernel source code.
constexpr unsigned long kHWCAP_PMULL = 1 << 4;
constexpr unsigned long kHWCAP_CRC32 = 1 << 7;
unsigned long hwcap =
#if HAVE_STRONG_GETAUXVAL
// Some compilers warn on (&getauxval != nullptr) in the block below.
getauxval(AT_HWCAP);
#elif HAVE_WEAK_GETAUXVAL
(&getauxval != nullptr) ? getauxval(AT_HWCAP) : 0;
#else
#error This is supposed to be nested inside a check for HAVE_*_GETAUXVAL.
#endif // HAVE_STRONG_GETAUXVAL
return (hwcap & (kHWCAP_PMULL | kHWCAP_CRC32)) ==
(kHWCAP_PMULL | kHWCAP_CRC32);
#elif defined(__APPLE__)
int val = 0;
size_t len = sizeof(val);
return sysctlbyname("hw.optional.armv8_crc32", &val, &len, nullptr, 0) == 0
&& val != 0;
#else
return false;
#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
}
} // namespace crc32c
#endif // HAVE_ARM64_CRC32C
#endif // CRC32C_CRC32C_ARM_CHECK_H_

View file

@ -0,0 +1,24 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "./crc32c_arm64.h"
#include "./crc32c_extend_unittests.h"
namespace crc32c {
#if HAVE_ARM64_CRC32C
struct Arm64TestTraits {
static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
return ExtendArm64(crc, data, count);
}
};
INSTANTIATE_TYPED_TEST_SUITE_P(Arm64, ExtendTest, Arm64TestTraits);
#endif // HAVE_ARM64_CRC32C
} // namespace crc32c

View file

@ -0,0 +1,104 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <cstddef>
#include <cstdint>
#include "crc32c/crc32c_config.h"
#include "benchmark/benchmark.h"
#if CRC32C_TESTS_BUILT_WITH_GLOG
#include "glog/logging.h"
#endif // CRC32C_TESTS_BUILT_WITH_GLOG
#include "./crc32c_arm64.h"
#include "./crc32c_arm64_check.h"
#include "./crc32c_internal.h"
#include "./crc32c_sse42.h"
#include "./crc32c_sse42_check.h"
#include "crc32c/crc32c.h"
class CRC32CBenchmark : public benchmark::Fixture {
public:
void SetUp(const benchmark::State& state) override {
block_size_ = static_cast<size_t>(state.range(0));
block_data_ = std::string(block_size_, 'x');
block_buffer_ = reinterpret_cast<const uint8_t*>(block_data_.data());
}
protected:
std::string block_data_;
const uint8_t* block_buffer_;
size_t block_size_;
};
BENCHMARK_DEFINE_F(CRC32CBenchmark, Public)(benchmark::State& state) {
uint32_t crc = 0;
for (auto _ : state)
crc = crc32c::Extend(crc, block_buffer_, block_size_);
state.SetBytesProcessed(state.iterations() * block_size_);
}
BENCHMARK_REGISTER_F(CRC32CBenchmark, Public)
->RangeMultiplier(16)
->Range(256, 16777216); // Block size.
BENCHMARK_DEFINE_F(CRC32CBenchmark, Portable)(benchmark::State& state) {
uint32_t crc = 0;
for (auto _ : state)
crc = crc32c::ExtendPortable(crc, block_buffer_, block_size_);
state.SetBytesProcessed(state.iterations() * block_size_);
}
BENCHMARK_REGISTER_F(CRC32CBenchmark, Portable)
->RangeMultiplier(16)
->Range(256, 16777216); // Block size.
#if HAVE_ARM64_CRC32C
BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmCRC32C)(benchmark::State& state) {
if (!crc32c::CanUseArm64Crc32()) {
state.SkipWithError("ARM CRC32C instructions not available or not enabled");
return;
}
uint32_t crc = 0;
for (auto _ : state)
crc = crc32c::ExtendArm64(crc, block_buffer_, block_size_);
state.SetBytesProcessed(state.iterations() * block_size_);
}
BENCHMARK_REGISTER_F(CRC32CBenchmark, ArmCRC32C)
->RangeMultiplier(16)
->Range(256, 16777216); // Block size.
#endif // HAVE_ARM64_CRC32C
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
BENCHMARK_DEFINE_F(CRC32CBenchmark, Sse42)(benchmark::State& state) {
if (!crc32c::CanUseSse42()) {
state.SkipWithError("SSE4.2 instructions not available or not enabled");
return;
}
uint32_t crc = 0;
for (auto _ : state)
crc = crc32c::ExtendSse42(crc, block_buffer_, block_size_);
state.SetBytesProcessed(state.iterations() * block_size_);
}
BENCHMARK_REGISTER_F(CRC32CBenchmark, Sse42)
->RangeMultiplier(16)
->Range(256, 16777216); // Block size.
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
int main(int argc, char** argv) {
#if CRC32C_TESTS_BUILT_WITH_GLOG
google::InitGoogleLogging(argv[0]);
google::InstallFailureSignalHandler();
#endif // CRC32C_TESTS_BUILT_WITH_GLOG
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View file

@ -0,0 +1,66 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "crc32c/crc32c.h"
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main() {
/* From rfc3720 section B.4. */
uint8_t buf[32];
memset(buf, 0, sizeof(buf));
if ((uint32_t)0x8a9136aa != crc32c_value(buf, sizeof(buf))) {
printf("crc32c_value(zeros) test failed\n");
return 1;
}
memset(buf, 0xff, sizeof(buf));
if ((uint32_t)0x62a8ab43 != crc32c_value(buf, sizeof(buf))) {
printf("crc32c_value(0xff) test failed\n");
return 1;
}
for (size_t i = 0; i < 32; ++i)
buf[i] = (uint8_t)i;
if ((uint32_t)0x46dd794e != crc32c_value(buf, sizeof(buf))) {
printf("crc32c_value(0..31) test failed\n");
return 1;
}
for (size_t i = 0; i < 32; ++i)
buf[i] = (uint8_t)(31 - i);
if ((uint32_t)0x113fdb5c != crc32c_value(buf, sizeof(buf))) {
printf("crc32c_value(31..0) test failed\n");
return 1;
}
uint8_t data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
if ((uint32_t)0xd9963a56 != crc32c_value(data, sizeof(data))) {
printf("crc32c_value(31..0) test failed\n");
return 1;
}
const uint8_t* hello_space_world = (const uint8_t*)"hello world";
const uint8_t* hello_space = (const uint8_t*)"hello ";
const uint8_t* world = (const uint8_t*)"world";
if (crc32c_value(hello_space_world, 11) !=
crc32c_extend(crc32c_value(hello_space, 6), world, 5)) {
printf("crc32c_extend test failed\n");
return 1;
}
printf("All tests passed\n");
return 0;
}

View file

@ -0,0 +1,112 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_EXTEND_UNITTESTS_H_
#define CRC32C_CRC32C_EXTEND_UNITTESTS_H_
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "gtest/gtest.h"
// Common test cases for all implementations of CRC32C_Extend().
namespace crc32c {
template<typename TestTraits>
class ExtendTest : public testing::Test {};
TYPED_TEST_SUITE_P(ExtendTest);
TYPED_TEST_P(ExtendTest, StandardResults) {
// From rfc3720 section B.4.
uint8_t buf[32];
std::memset(buf, 0, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
TypeParam::Extend(0, buf, sizeof(buf)));
std::memset(buf, 0xff, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
TypeParam::Extend(0, buf, sizeof(buf)));
for (int i = 0; i < 32; ++i)
buf[i] = static_cast<uint8_t>(i);
EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
TypeParam::Extend(0, buf, sizeof(buf)));
for (int i = 0; i < 32; ++i)
buf[i] = static_cast<uint8_t>(31 - i);
EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
TypeParam::Extend(0, buf, sizeof(buf)));
uint8_t data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
EXPECT_EQ(static_cast<uint32_t>(0xd9963a56),
TypeParam::Extend(0, data, sizeof(data)));
}
TYPED_TEST_P(ExtendTest, HelloWorld) {
const uint8_t* hello_space_world =
reinterpret_cast<const uint8_t*>("hello world");
const uint8_t* hello_space = reinterpret_cast<const uint8_t*>("hello ");
const uint8_t* world = reinterpret_cast<const uint8_t*>("world");
EXPECT_EQ(TypeParam::Extend(0, hello_space_world, 11),
TypeParam::Extend(TypeParam::Extend(0, hello_space, 6), world, 5));
}
TYPED_TEST_P(ExtendTest, BufferSlicing) {
uint8_t buffer[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
for (size_t i = 0; i < 48; ++i) {
for (size_t j = i + 1; j <= 48; ++j) {
uint32_t crc = 0;
if (i > 0) crc = TypeParam::Extend(crc, buffer, i);
crc = TypeParam::Extend(crc, buffer + i, j - i);
if (j < 48) crc = TypeParam::Extend(crc, buffer + j, 48 - j);
EXPECT_EQ(static_cast<uint32_t>(0xd9963a56), crc);
}
}
}
TYPED_TEST_P(ExtendTest, LargeBufferSlicing) {
uint8_t buffer[2048];
for (size_t i = 0; i < 2048; i++)
buffer[i] = static_cast<uint8_t>(3 * i * i + 7 * i + 11);
for (size_t i = 0; i < 2048; ++i) {
for (size_t j = i + 1; j <= 2048; ++j) {
uint32_t crc = 0;
if (i > 0) crc = TypeParam::Extend(crc, buffer, i);
crc = TypeParam::Extend(crc, buffer + i, j - i);
if (j < 2048) crc = TypeParam::Extend(crc, buffer + j, 2048 - j);
EXPECT_EQ(static_cast<uint32_t>(0x36dcc753), crc);
}
}
}
REGISTER_TYPED_TEST_SUITE_P(ExtendTest,
StandardResults,
HelloWorld,
BufferSlicing,
LargeBufferSlicing);
} // namespace crc32c
#endif // CRC32C_CRC32C_EXTEND_UNITTESTS_H_

View file

@ -0,0 +1,23 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_INTERNAL_H_
#define CRC32C_CRC32C_INTERNAL_H_
// Internal functions that may change between releases.
#include <cstddef>
#include <cstdint>
namespace crc32c {
// Un-accelerated implementation that works on all CPUs.
uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t count);
// CRCs are pre- and post- conditioned by xoring with all ones.
static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
} // namespace crc32c
#endif // CRC32C_CRC32C_INTERNAL_H_

View file

@ -0,0 +1,351 @@
// Copyright 2008 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "./crc32c_internal.h"
#include <cstddef>
#include <cstdint>
#include "./crc32c_prefetch.h"
#include "./crc32c_read_le.h"
#include "./crc32c_round_up.h"
namespace {
const uint32_t kByteExtensionTable[256] = {
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
const uint32_t kStrideExtensionTable0[256] = {
0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
const uint32_t kStrideExtensionTable1[256] = {
0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
const uint32_t kStrideExtensionTable2[256] = {
0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
const uint32_t kStrideExtensionTable3[256] = {
0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
constexpr const ptrdiff_t kPrefetchHorizon = 256;
} // namespace
namespace crc32c {
uint32_t ExtendPortable(uint32_t crc, const uint8_t* data, size_t size) {
const uint8_t* p = data;
const uint8_t* e = p + size;
uint32_t l = crc ^ kCRC32Xor;
// Process one byte at a time.
#define STEP1 \
do { \
int c = (l & 0xff) ^ *p++; \
l = kByteExtensionTable[c] ^ (l >> 8); \
} while (0)
// Process one of the 4 strides of 4-byte data.
#define STEP4(s) \
do { \
crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
kStrideExtensionTable0[crc##s >> 24]; \
} while (0)
// Process a 16-byte swath of 4 strides, each of which has 4 bytes of data.
#define STEP16 \
do { \
STEP4(0); \
STEP4(1); \
STEP4(2); \
STEP4(3); \
p += 16; \
} while (0)
// Process 4 bytes that were already loaded into a word.
#define STEP4W(w) \
do { \
w ^= l; \
for (size_t i = 0; i < 4; ++i) { \
w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
} \
l = w; \
} while (0)
// Point x at first 4-byte aligned byte in the buffer. This might be past the
// end of the buffer.
const uint8_t* x = RoundUp<4>(p);
if (x <= e) {
// Process bytes p is 4-byte aligned.
while (p != x) {
STEP1;
}
}
if ((e - p) >= 16) {
// Load a 16-byte swath into the stride partial results.
uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
uint32_t crc1 = ReadUint32LE(p + 1 * 4);
uint32_t crc2 = ReadUint32LE(p + 2 * 4);
uint32_t crc3 = ReadUint32LE(p + 3 * 4);
p += 16;
while ((e - p) > kPrefetchHorizon) {
RequestPrefetch(p + kPrefetchHorizon);
// Process 64 bytes at a time.
STEP16;
STEP16;
STEP16;
STEP16;
}
// Process one 16-byte swath at a time.
while ((e - p) >= 16) {
STEP16;
}
// Advance one word at a time as far as possible.
while ((e - p) >= 4) {
STEP4(0);
uint32_t tmp = crc0;
crc0 = crc1;
crc1 = crc2;
crc2 = crc3;
crc3 = tmp;
p += 4;
}
// Combine the 4 partial stride results.
l = 0;
STEP4W(crc0);
STEP4W(crc1);
STEP4W(crc2);
STEP4W(crc3);
}
// Process the last few bytes.
while (p != e) {
STEP1;
}
#undef STEP4W
#undef STEP16
#undef STEP4
#undef STEP1
return l ^ kCRC32Xor;
}
} // namespace crc32c

View file

@ -0,0 +1,20 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "./crc32c_extend_unittests.h"
#include "./crc32c_internal.h"
namespace crc32c {
struct PortableTestTraits {
static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
return ExtendPortable(crc, data, count);
}
};
INSTANTIATE_TYPED_TEST_SUITE_P(Portable, ExtendTest, PortableTestTraits);
} // namespace crc32c

View file

@ -0,0 +1,44 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_PREFETCH_H_
#define CRC32C_CRC32C_PREFETCH_H_
#include <cstddef>
#include <cstdint>
#include "crc32c/crc32c_config.h"
#if HAVE_MM_PREFETCH
#if defined(_MSC_VER)
#include <intrin.h>
#else // !defined(_MSC_VER)
#include <xmmintrin.h>
#endif // defined(_MSC_VER)
#endif // HAVE_MM_PREFETCH
namespace crc32c {
// Ask the hardware to prefetch the data at the given address into the L1 cache.
inline void RequestPrefetch(const uint8_t* address) {
#if HAVE_BUILTIN_PREFETCH
// Clang and GCC implement the __builtin_prefetch non-standard extension,
// which maps to the best instruction on the target architecture.
__builtin_prefetch(reinterpret_cast<const char*>(address), 0 /* Read only. */,
0 /* No temporal locality. */);
#elif HAVE_MM_PREFETCH
// Visual Studio doesn't implement __builtin_prefetch, but exposes the
// PREFETCHNTA instruction via the _mm_prefetch intrinsic.
_mm_prefetch(reinterpret_cast<const char*>(address), _MM_HINT_NTA);
#else
// No prefetch support. Silence compiler warnings.
(void)address;
#endif // HAVE_BUILTIN_PREFETCH
}
} // namespace crc32c
#endif // CRC32C_CRC32C_ROUND_UP_H_

View file

@ -0,0 +1,9 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "./crc32c_prefetch.h"
// There is no easy way to test cache prefetching. We can only test that the
// crc32c_prefetch.h header compiles on its own, so it doesn't have any unstated
// dependencies.

View file

@ -0,0 +1,51 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_READ_LE_H_
#define CRC32C_CRC32C_READ_LE_H_
#include <cstdint>
#include <cstring>
#include "crc32c/crc32c_config.h"
namespace crc32c {
// Reads a little-endian 32-bit integer from a 32-bit-aligned buffer.
inline uint32_t ReadUint32LE(const uint8_t* buffer) {
#if BYTE_ORDER_BIG_ENDIAN
return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) |
(static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) |
(static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) |
(static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24));
#else // !BYTE_ORDER_BIG_ENDIAN
uint32_t result;
// This should be optimized to a single instruction.
std::memcpy(&result, buffer, sizeof(result));
return result;
#endif // BYTE_ORDER_BIG_ENDIAN
}
// Reads a little-endian 64-bit integer from a 64-bit-aligned buffer.
inline uint64_t ReadUint64LE(const uint8_t* buffer) {
#if BYTE_ORDER_BIG_ENDIAN
return ((static_cast<uint64_t>(static_cast<uint8_t>(buffer[0]))) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[1])) << 8) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[2])) << 16) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[3])) << 24) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[4])) << 32) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[5])) << 40) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[6])) << 48) |
(static_cast<uint64_t>(static_cast<uint8_t>(buffer[7])) << 56));
#else // !BYTE_ORDER_BIG_ENDIAN
uint64_t result;
// This should be optimized to a single instruction.
std::memcpy(&result, buffer, sizeof(result));
return result;
#endif // BYTE_ORDER_BIG_ENDIAN
}
} // namespace crc32c
#endif // CRC32C_CRC32C_READ_LE_H_

View file

@ -0,0 +1,32 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "./crc32c_read_le.h"
#include <cstddef>
#include <cstdint>
#include "gtest/gtest.h"
#include "./crc32c_round_up.h"
namespace crc32c {
TEST(Crc32CReadLETest, ReadUint32LE) {
// little-endian 0x12345678
alignas(4) uint8_t bytes[] = {0x78, 0x56, 0x34, 0x12};
ASSERT_EQ(RoundUp<4>(bytes), bytes) << "Stack array is not aligned";
EXPECT_EQ(static_cast<uint32_t>(0x12345678), ReadUint32LE(bytes));
}
TEST(Crc32CReadLETest, ReadUint64LE) {
// little-endian 0x123456789ABCDEF0
alignas(8) uint8_t bytes[] = {0xF0, 0xDE, 0xBC, 0x9A, 0x78, 0x56, 0x34, 0x12};
ASSERT_EQ(RoundUp<8>(bytes), bytes) << "Stack array is not aligned";
EXPECT_EQ(static_cast<uint64_t>(0x123456789ABCDEF0), ReadUint64LE(bytes));
}
} // namespace crc32c

View file

@ -0,0 +1,34 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_ROUND_UP_H_
#define CRC32C_CRC32C_ROUND_UP_H_
#include <cstddef>
#include <cstdint>
namespace crc32c {
// Returns the smallest number >= the given number that is evenly divided by N.
//
// N must be a power of two.
template <int N>
constexpr inline uintptr_t RoundUp(uintptr_t pointer) {
static_assert((N & (N - 1)) == 0, "N must be a power of two");
return (pointer + (N - 1)) & ~(N - 1);
}
// Returns the smallest address >= the given address that is aligned to N bytes.
//
// N must be a power of two.
template <int N>
constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
static_assert((N & (N - 1)) == 0, "N must be a power of two");
return reinterpret_cast<uint8_t*>(
RoundUp<N>(reinterpret_cast<uintptr_t>(pointer)));
}
} // namespace crc32c
#endif // CRC32C_CRC32C_ROUND_UP_H_

View file

@ -0,0 +1,84 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "./crc32c_round_up.h"
#include <cstddef>
#include <cstdint>
#include "gtest/gtest.h"
namespace crc32c {
TEST(CRC32CRoundUpTest, RoundUpUintptr) {
uintptr_t zero = 0;
ASSERT_EQ(zero, RoundUp<1>(zero));
ASSERT_EQ(1U, RoundUp<1>(1U));
ASSERT_EQ(2U, RoundUp<1>(2U));
ASSERT_EQ(3U, RoundUp<1>(3U));
ASSERT_EQ(~static_cast<uintptr_t>(0), RoundUp<1>(~static_cast<uintptr_t>(0)));
ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<1>(~static_cast<uintptr_t>(1)));
ASSERT_EQ(~static_cast<uintptr_t>(2), RoundUp<1>(~static_cast<uintptr_t>(2)));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<1>(~static_cast<uintptr_t>(3)));
ASSERT_EQ(zero, RoundUp<2>(zero));
ASSERT_EQ(2U, RoundUp<2>(1U));
ASSERT_EQ(2U, RoundUp<2>(2U));
ASSERT_EQ(4U, RoundUp<2>(3U));
ASSERT_EQ(4U, RoundUp<2>(4U));
ASSERT_EQ(6U, RoundUp<2>(5U));
ASSERT_EQ(6U, RoundUp<2>(6U));
ASSERT_EQ(8U, RoundUp<2>(7U));
ASSERT_EQ(8U, RoundUp<2>(8U));
ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(1)));
ASSERT_EQ(~static_cast<uintptr_t>(1), RoundUp<2>(~static_cast<uintptr_t>(2)));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(3)));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<2>(~static_cast<uintptr_t>(4)));
ASSERT_EQ(zero, RoundUp<4>(zero));
ASSERT_EQ(4U, RoundUp<4>(1U));
ASSERT_EQ(4U, RoundUp<4>(2U));
ASSERT_EQ(4U, RoundUp<4>(3U));
ASSERT_EQ(4U, RoundUp<4>(4U));
ASSERT_EQ(8U, RoundUp<4>(5U));
ASSERT_EQ(8U, RoundUp<4>(6U));
ASSERT_EQ(8U, RoundUp<4>(7U));
ASSERT_EQ(8U, RoundUp<4>(8U));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(3)));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(4)));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(5)));
ASSERT_EQ(~static_cast<uintptr_t>(3), RoundUp<4>(~static_cast<uintptr_t>(6)));
ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(7)));
ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(8)));
ASSERT_EQ(~static_cast<uintptr_t>(7), RoundUp<4>(~static_cast<uintptr_t>(9)));
}
TEST(CRC32CRoundUpTest, RoundUpPointer) {
uintptr_t zero = 0, three = 3, four = 4, seven = 7, eight = 8;
const uint8_t* zero_ptr = reinterpret_cast<const uint8_t*>(zero);
const uint8_t* three_ptr = reinterpret_cast<const uint8_t*>(three);
const uint8_t* four_ptr = reinterpret_cast<const uint8_t*>(four);
const uint8_t* seven_ptr = reinterpret_cast<const uint8_t*>(seven);
const uint8_t* eight_ptr = reinterpret_cast<uint8_t*>(eight);
ASSERT_EQ(zero_ptr, RoundUp<1>(zero_ptr));
ASSERT_EQ(zero_ptr, RoundUp<4>(zero_ptr));
ASSERT_EQ(zero_ptr, RoundUp<8>(zero_ptr));
ASSERT_EQ(three_ptr, RoundUp<1>(three_ptr));
ASSERT_EQ(four_ptr, RoundUp<4>(three_ptr));
ASSERT_EQ(eight_ptr, RoundUp<8>(three_ptr));
ASSERT_EQ(four_ptr, RoundUp<1>(four_ptr));
ASSERT_EQ(four_ptr, RoundUp<4>(four_ptr));
ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr));
ASSERT_EQ(seven_ptr, RoundUp<1>(seven_ptr));
ASSERT_EQ(eight_ptr, RoundUp<4>(seven_ptr));
ASSERT_EQ(eight_ptr, RoundUp<8>(four_ptr));
}
} // namespace crc32c

View file

@ -0,0 +1,256 @@
// Copyright 2008 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "./crc32c_sse42.h"
// In a separate source file to allow this accelerated CRC32C function to be
// compiled with the appropriate compiler flags to enable SSE4.2 instructions.
// This implementation is loosely based on Intel Pub 323405 from April 2011,
// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction".
#include <cstddef>
#include <cstdint>
#include "./crc32c_internal.h"
#include "./crc32c_prefetch.h"
#include "./crc32c_read_le.h"
#include "./crc32c_round_up.h"
#include "crc32c/crc32c_config.h"
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
#if defined(_MSC_VER)
#include <intrin.h>
#else // !defined(_MSC_VER)
#include <nmmintrin.h>
#endif // defined(_MSC_VER)
namespace crc32c {
namespace {
constexpr const ptrdiff_t kGroups = 3;
constexpr const ptrdiff_t kBlock0Size = 16 * 1024 / kGroups / 64 * 64;
constexpr const ptrdiff_t kBlock1Size = 4 * 1024 / kGroups / 8 * 8;
constexpr const ptrdiff_t kBlock2Size = 1024 / kGroups / 8 * 8;
const uint32_t kBlock0SkipTable[8][16] = {
{0x00000000, 0xff770459, 0xfb027e43, 0x04757a1a, 0xf3e88a77, 0x0c9f8e2e,
0x08eaf434, 0xf79df06d, 0xe23d621f, 0x1d4a6646, 0x193f1c5c, 0xe6481805,
0x11d5e868, 0xeea2ec31, 0xead7962b, 0x15a09272},
{0x00000000, 0xc196b2cf, 0x86c1136f, 0x4757a1a0, 0x086e502f, 0xc9f8e2e0,
0x8eaf4340, 0x4f39f18f, 0x10dca05e, 0xd14a1291, 0x961db331, 0x578b01fe,
0x18b2f071, 0xd92442be, 0x9e73e31e, 0x5fe551d1},
{0x00000000, 0x21b940bc, 0x43728178, 0x62cbc1c4, 0x86e502f0, 0xa75c424c,
0xc5978388, 0xe42ec334, 0x08267311, 0x299f33ad, 0x4b54f269, 0x6aedb2d5,
0x8ec371e1, 0xaf7a315d, 0xcdb1f099, 0xec08b025},
{0x00000000, 0x104ce622, 0x2099cc44, 0x30d52a66, 0x41339888, 0x517f7eaa,
0x61aa54cc, 0x71e6b2ee, 0x82673110, 0x922bd732, 0xa2fefd54, 0xb2b21b76,
0xc354a998, 0xd3184fba, 0xe3cd65dc, 0xf38183fe},
{0x00000000, 0x012214d1, 0x024429a2, 0x03663d73, 0x04885344, 0x05aa4795,
0x06cc7ae6, 0x07ee6e37, 0x0910a688, 0x0832b259, 0x0b548f2a, 0x0a769bfb,
0x0d98f5cc, 0x0cbae11d, 0x0fdcdc6e, 0x0efec8bf},
{0x00000000, 0x12214d10, 0x24429a20, 0x3663d730, 0x48853440, 0x5aa47950,
0x6cc7ae60, 0x7ee6e370, 0x910a6880, 0x832b2590, 0xb548f2a0, 0xa769bfb0,
0xd98f5cc0, 0xcbae11d0, 0xfdcdc6e0, 0xefec8bf0},
{0x00000000, 0x27f8a7f1, 0x4ff14fe2, 0x6809e813, 0x9fe29fc4, 0xb81a3835,
0xd013d026, 0xf7eb77d7, 0x3a294979, 0x1dd1ee88, 0x75d8069b, 0x5220a16a,
0xa5cbd6bd, 0x8233714c, 0xea3a995f, 0xcdc23eae},
{0x00000000, 0x745292f2, 0xe8a525e4, 0x9cf7b716, 0xd4a63d39, 0xa0f4afcb,
0x3c0318dd, 0x48518a2f, 0xaca00c83, 0xd8f29e71, 0x44052967, 0x3057bb95,
0x780631ba, 0x0c54a348, 0x90a3145e, 0xe4f186ac},
};
const uint32_t kBlock1SkipTable[8][16] = {
{0x00000000, 0x79113270, 0xf22264e0, 0x8b335690, 0xe1a8bf31, 0x98b98d41,
0x138adbd1, 0x6a9be9a1, 0xc6bd0893, 0xbfac3ae3, 0x349f6c73, 0x4d8e5e03,
0x2715b7a2, 0x5e0485d2, 0xd537d342, 0xac26e132},
{0x00000000, 0x889667d7, 0x14c0b95f, 0x9c56de88, 0x298172be, 0xa1171569,
0x3d41cbe1, 0xb5d7ac36, 0x5302e57c, 0xdb9482ab, 0x47c25c23, 0xcf543bf4,
0x7a8397c2, 0xf215f015, 0x6e432e9d, 0xe6d5494a},
{0x00000000, 0xa605caf8, 0x49e7e301, 0xefe229f9, 0x93cfc602, 0x35ca0cfa,
0xda282503, 0x7c2deffb, 0x2273faf5, 0x8476300d, 0x6b9419f4, 0xcd91d30c,
0xb1bc3cf7, 0x17b9f60f, 0xf85bdff6, 0x5e5e150e},
{0x00000000, 0x44e7f5ea, 0x89cfebd4, 0xcd281e3e, 0x1673a159, 0x529454b3,
0x9fbc4a8d, 0xdb5bbf67, 0x2ce742b2, 0x6800b758, 0xa528a966, 0xe1cf5c8c,
0x3a94e3eb, 0x7e731601, 0xb35b083f, 0xf7bcfdd5},
{0x00000000, 0x59ce8564, 0xb39d0ac8, 0xea538fac, 0x62d66361, 0x3b18e605,
0xd14b69a9, 0x8885eccd, 0xc5acc6c2, 0x9c6243a6, 0x7631cc0a, 0x2fff496e,
0xa77aa5a3, 0xfeb420c7, 0x14e7af6b, 0x4d292a0f},
{0x00000000, 0x8eb5fb75, 0x1887801b, 0x96327b6e, 0x310f0036, 0xbfbafb43,
0x2988802d, 0xa73d7b58, 0x621e006c, 0xecabfb19, 0x7a998077, 0xf42c7b02,
0x5311005a, 0xdda4fb2f, 0x4b968041, 0xc5237b34},
{0x00000000, 0xc43c00d8, 0x8d947741, 0x49a87799, 0x1ec49873, 0xdaf898ab,
0x9350ef32, 0x576cefea, 0x3d8930e6, 0xf9b5303e, 0xb01d47a7, 0x7421477f,
0x234da895, 0xe771a84d, 0xaed9dfd4, 0x6ae5df0c},
{0x00000000, 0x7b1261cc, 0xf624c398, 0x8d36a254, 0xe9a5f1c1, 0x92b7900d,
0x1f813259, 0x64935395, 0xd6a79573, 0xadb5f4bf, 0x208356eb, 0x5b913727,
0x3f0264b2, 0x4410057e, 0xc926a72a, 0xb234c6e6},
};
const uint32_t kBlock2SkipTable[8][16] = {
{0x00000000, 0x8f158014, 0x1bc776d9, 0x94d2f6cd, 0x378eedb2, 0xb89b6da6,
0x2c499b6b, 0xa35c1b7f, 0x6f1ddb64, 0xe0085b70, 0x74daadbd, 0xfbcf2da9,
0x589336d6, 0xd786b6c2, 0x4354400f, 0xcc41c01b},
{0x00000000, 0xde3bb6c8, 0xb99b1b61, 0x67a0ada9, 0x76da4033, 0xa8e1f6fb,
0xcf415b52, 0x117aed9a, 0xedb48066, 0x338f36ae, 0x542f9b07, 0x8a142dcf,
0x9b6ec055, 0x4555769d, 0x22f5db34, 0xfcce6dfc},
{0x00000000, 0xde85763d, 0xb8e69a8b, 0x6663ecb6, 0x742143e7, 0xaaa435da,
0xccc7d96c, 0x1242af51, 0xe84287ce, 0x36c7f1f3, 0x50a41d45, 0x8e216b78,
0x9c63c429, 0x42e6b214, 0x24855ea2, 0xfa00289f},
{0x00000000, 0xd569796d, 0xaf3e842b, 0x7a57fd46, 0x5b917ea7, 0x8ef807ca,
0xf4affa8c, 0x21c683e1, 0xb722fd4e, 0x624b8423, 0x181c7965, 0xcd750008,
0xecb383e9, 0x39dafa84, 0x438d07c2, 0x96e47eaf},
{0x00000000, 0x6ba98c6d, 0xd75318da, 0xbcfa94b7, 0xab4a4745, 0xc0e3cb28,
0x7c195f9f, 0x17b0d3f2, 0x5378f87b, 0x38d17416, 0x842be0a1, 0xef826ccc,
0xf832bf3e, 0x939b3353, 0x2f61a7e4, 0x44c82b89},
{0x00000000, 0xa6f1f0f6, 0x480f971d, 0xeefe67eb, 0x901f2e3a, 0x36eedecc,
0xd810b927, 0x7ee149d1, 0x25d22a85, 0x8323da73, 0x6dddbd98, 0xcb2c4d6e,
0xb5cd04bf, 0x133cf449, 0xfdc293a2, 0x5b336354},
{0x00000000, 0x4ba4550a, 0x9748aa14, 0xdcecff1e, 0x2b7d22d9, 0x60d977d3,
0xbc3588cd, 0xf791ddc7, 0x56fa45b2, 0x1d5e10b8, 0xc1b2efa6, 0x8a16baac,
0x7d87676b, 0x36233261, 0xeacfcd7f, 0xa16b9875},
{0x00000000, 0xadf48b64, 0x5e056039, 0xf3f1eb5d, 0xbc0ac072, 0x11fe4b16,
0xe20fa04b, 0x4ffb2b2f, 0x7df9f615, 0xd00d7d71, 0x23fc962c, 0x8e081d48,
0xc1f33667, 0x6c07bd03, 0x9ff6565e, 0x3202dd3a},
};
constexpr const ptrdiff_t kPrefetchHorizon = 256;
} // namespace
uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t size) {
const uint8_t* p = data;
const uint8_t* e = data + size;
uint32_t l = crc ^ kCRC32Xor;
#define STEP1 \
do { \
l = _mm_crc32_u8(l, *p++); \
} while (0)
#define STEP4(crc) \
do { \
crc = _mm_crc32_u32(crc, ReadUint32LE(p)); \
p += 4; \
} while (0)
#define STEP8(crc, data) \
do { \
crc = _mm_crc32_u64(crc, ReadUint64LE(data)); \
data += 8; \
} while (0)
#define STEP8BY3(crc0, crc1, crc2, p0, p1, p2) \
do { \
STEP8(crc0, p0); \
STEP8(crc1, p1); \
STEP8(crc2, p2); \
} while (0)
#define STEP8X3(crc0, crc1, crc2, bs) \
do { \
crc0 = _mm_crc32_u64(crc0, ReadUint64LE(p)); \
crc1 = _mm_crc32_u64(crc1, ReadUint64LE(p + bs)); \
crc2 = _mm_crc32_u64(crc2, ReadUint64LE(p + 2 * bs)); \
p += 8; \
} while (0)
#define SKIP_BLOCK(crc, tab) \
do { \
crc = tab[0][crc & 0xf] ^ tab[1][(crc >> 4) & 0xf] ^ \
tab[2][(crc >> 8) & 0xf] ^ tab[3][(crc >> 12) & 0xf] ^ \
tab[4][(crc >> 16) & 0xf] ^ tab[5][(crc >> 20) & 0xf] ^ \
tab[6][(crc >> 24) & 0xf] ^ tab[7][(crc >> 28) & 0xf]; \
} while (0)
// Point x at first 8-byte aligned byte in the buffer. This might be past the
// end of the buffer.
const uint8_t* x = RoundUp<8>(p);
if (x <= e) {
// Process bytes p is 8-byte aligned.
while (p != x) {
STEP1;
}
}
// Process the data in predetermined block sizes with tables for quickly
// combining the checksum. Experimentally it's better to use larger block
// sizes where possible so use a hierarchy of decreasing block sizes.
uint64_t l64 = l;
while ((e - p) >= kGroups * kBlock0Size) {
uint64_t l641 = 0;
uint64_t l642 = 0;
for (int i = 0; i < kBlock0Size; i += 8 * 8) {
// Prefetch ahead to hide latency.
RequestPrefetch(p + kPrefetchHorizon);
RequestPrefetch(p + kBlock0Size + kPrefetchHorizon);
RequestPrefetch(p + 2 * kBlock0Size + kPrefetchHorizon);
// Process 64 bytes at a time.
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
STEP8X3(l64, l641, l642, kBlock0Size);
}
// Combine results.
SKIP_BLOCK(l64, kBlock0SkipTable);
l64 ^= l641;
SKIP_BLOCK(l64, kBlock0SkipTable);
l64 ^= l642;
p += (kGroups - 1) * kBlock0Size;
}
while ((e - p) >= kGroups * kBlock1Size) {
uint64_t l641 = 0;
uint64_t l642 = 0;
for (int i = 0; i < kBlock1Size; i += 8) {
STEP8X3(l64, l641, l642, kBlock1Size);
}
SKIP_BLOCK(l64, kBlock1SkipTable);
l64 ^= l641;
SKIP_BLOCK(l64, kBlock1SkipTable);
l64 ^= l642;
p += (kGroups - 1) * kBlock1Size;
}
while ((e - p) >= kGroups * kBlock2Size) {
uint64_t l641 = 0;
uint64_t l642 = 0;
for (int i = 0; i < kBlock2Size; i += 8) {
STEP8X3(l64, l641, l642, kBlock2Size);
}
SKIP_BLOCK(l64, kBlock2SkipTable);
l64 ^= l641;
SKIP_BLOCK(l64, kBlock2SkipTable);
l64 ^= l642;
p += (kGroups - 1) * kBlock2Size;
}
// Process bytes 16 at a time
while ((e - p) >= 16) {
STEP8(l64, p);
STEP8(l64, p);
}
l = static_cast<uint32_t>(l64);
// Process the last few bytes.
while (p != e) {
STEP1;
}
#undef SKIP_BLOCK
#undef STEP8X3
#undef STEP8BY3
#undef STEP8
#undef STEP4
#undef STEP1
return l ^ kCRC32Xor;
}
} // namespace crc32c
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))

View file

@ -0,0 +1,31 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_SSE42_H_
#define CRC32C_CRC32C_SSE42_H_
// X86-specific code.
#include <cstddef>
#include <cstdint>
#include "crc32c/crc32c_config.h"
// The hardware-accelerated implementation is only enabled for 64-bit builds,
// because a straightforward 32-bit implementation actually runs slower than the
// portable version. Most X86 machines are 64-bit nowadays, so it doesn't make
// much sense to spend time building an optimized hardware-accelerated
// implementation.
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
namespace crc32c {
// SSE4.2-accelerated implementation in crc32c_sse42.cc
uint32_t ExtendSse42(uint32_t crc, const uint8_t* data, size_t count);
} // namespace crc32c
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
#endif // CRC32C_CRC32C_SSE42_H_

View file

@ -0,0 +1,48 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_SSE42_CHECK_H_
#define CRC32C_CRC32C_SSE42_CHECK_H_
// X86-specific code checking the availability of SSE4.2 instructions.
#include <cstddef>
#include <cstdint>
#include "crc32c/crc32c_config.h"
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
// If the compiler supports SSE4.2, it definitely supports X86.
#if defined(_MSC_VER)
#include <intrin.h>
namespace crc32c {
inline bool CanUseSse42() {
int cpu_info[4];
__cpuid(cpu_info, 1);
return (cpu_info[2] & (1 << 20)) != 0;
}
} // namespace crc32c
#else // !defined(_MSC_VER)
#include <cpuid.h>
namespace crc32c {
inline bool CanUseSse42() {
unsigned int eax, ebx, ecx, edx;
return __get_cpuid(1, &eax, &ebx, &ecx, &edx) && ((ecx & (1 << 20)) != 0);
}
} // namespace crc32c
#endif // defined(_MSC_VER)
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
#endif // CRC32C_CRC32C_SSE42_CHECK_H_

View file

@ -0,0 +1,24 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "./crc32c_extend_unittests.h"
#include "./crc32c_sse42.h"
namespace crc32c {
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
struct Sse42TestTraits {
static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
return ExtendSse42(crc, data, count);
}
};
INSTANTIATE_TYPED_TEST_SUITE_P(Sse42, ExtendTest, Sse42TestTraits);
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
} // namespace crc32c

View file

@ -0,0 +1,20 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "crc32c/crc32c_config.h"
#include "gtest/gtest.h"
#if CRC32C_TESTS_BUILT_WITH_GLOG
#include "glog/logging.h"
#endif // CRC32C_TESTS_BUILT_WITH_GLOG
int main(int argc, char** argv) {
#if CRC32C_TESTS_BUILT_WITH_GLOG
google::InitGoogleLogging(argv[0]);
google::InstallFailureSignalHandler();
#endif // CRC32C_TESTS_BUILT_WITH_GLOG
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View file

@ -0,0 +1,129 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "crc32c/crc32c.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "gtest/gtest.h"
#include "./crc32c_extend_unittests.h"
TEST(Crc32CTest, Crc32c) {
// From rfc3720 section B.4.
uint8_t buf[32];
std::memset(buf, 0, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
crc32c::Crc32c(buf, sizeof(buf)));
std::memset(buf, 0xff, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
crc32c::Crc32c(buf, sizeof(buf)));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<uint8_t>(i);
EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
crc32c::Crc32c(buf, sizeof(buf)));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<uint8_t>(31 - i);
EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
crc32c::Crc32c(buf, sizeof(buf)));
uint8_t data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
EXPECT_EQ(static_cast<uint32_t>(0xd9963a56),
crc32c::Crc32c(data, sizeof(data)));
}
namespace crc32c {
struct ApiTestTraits {
static uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
return ::crc32c::Extend(crc, data, count);
}
};
INSTANTIATE_TYPED_TEST_SUITE_P(Api, ExtendTest, ApiTestTraits);
} // namespace crc32c
TEST(CRC32CTest, Crc32cCharPointer) {
char buf[32];
std::memset(buf, 0, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa),
crc32c::Crc32c(buf, sizeof(buf)));
std::memset(buf, 0xff, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43),
crc32c::Crc32c(buf, sizeof(buf)));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<char>(i);
EXPECT_EQ(static_cast<uint32_t>(0x46dd794e),
crc32c::Crc32c(buf, sizeof(buf)));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<char>(31 - i);
EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c),
crc32c::Crc32c(buf, sizeof(buf)));
}
TEST(CRC32CTest, Crc32cStdString) {
std::string buf;
buf.resize(32);
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<char>(0x00);
EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(buf));
for (size_t i = 0; i < 32; ++i)
buf[i] = '\xff';
EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(buf));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<char>(i);
EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(buf));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<char>(31 - i);
EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(buf));
}
#if __cplusplus > 201402L
#if __has_include(<string_view>)
TEST(CRC32CTest, Crc32cStdStringView) {
uint8_t buf[32];
std::string_view view(reinterpret_cast<const char*>(buf), sizeof(buf));
std::memset(buf, 0, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x8a9136aa), crc32c::Crc32c(view));
std::memset(buf, 0xff, sizeof(buf));
EXPECT_EQ(static_cast<uint32_t>(0x62a8ab43), crc32c::Crc32c(view));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<uint8_t>(i);
EXPECT_EQ(static_cast<uint32_t>(0x46dd794e), crc32c::Crc32c(view));
for (size_t i = 0; i < 32; ++i)
buf[i] = static_cast<uint8_t>(31 - i);
EXPECT_EQ(static_cast<uint32_t>(0x113fdb5c), crc32c::Crc32c(view));
}
#endif // __has_include(<string_view>)
#endif // __cplusplus > 201402L
#define TESTED_EXTEND Extend
#include "./crc32c_extend_unittests.h"
#undef TESTED_EXTEND

View file

@ -0,0 +1,89 @@
/* Copyright 2017 The CRC32C Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. See the AUTHORS file for names of contributors. */
#ifndef CRC32C_CRC32C_H_
#define CRC32C_CRC32C_H_
/* The API exported by the CRC32C project. */
#if defined(__cplusplus)
#include <cstddef>
#include <cstdint>
#include <string>
#else /* !defined(__cplusplus) */
#include <stddef.h>
#include <stdint.h>
#endif /* !defined(__cplusplus) */
/* The C API. */
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
/* Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by
"data" */
uint32_t crc32c_extend(uint32_t crc, const uint8_t* data, size_t count);
/* Computes the CRC32C of "count" bytes in the buffer pointed by "data". */
uint32_t crc32c_value(const uint8_t* data, size_t count);
#ifdef __cplusplus
} /* end extern "C" */
#endif /* defined(__cplusplus) */
/* The C++ API. */
#if defined(__cplusplus)
namespace crc32c {
// Extends "crc" with the CRC32C of "count" bytes in the buffer pointed by
// "data".
uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count);
// Computes the CRC32C of "count" bytes in the buffer pointed by "data".
inline uint32_t Crc32c(const uint8_t* data, size_t count) {
return Extend(0, data, count);
}
// Computes the CRC32C of "count" bytes in the buffer pointed by "data".
inline uint32_t Crc32c(const char* data, size_t count) {
return Extend(0, reinterpret_cast<const uint8_t*>(data), count);
}
// Computes the CRC32C of the string's content.
inline uint32_t Crc32c(const std::string& string) {
return Crc32c(reinterpret_cast<const uint8_t*>(string.data()),
string.size());
}
} // namespace crc32c
#if __cplusplus > 201402L
#if __has_include(<string_view>)
#include <string_view>
namespace crc32c {
// Computes the CRC32C of the bytes in the string_view.
inline uint32_t Crc32c(const std::string_view& string_view) {
return Crc32c(reinterpret_cast<const uint8_t*>(string_view.data()),
string_view.size());
}
} // namespace crc32c
#endif // __has_include(<string_view>)
#endif // __cplusplus > 201402L
#endif /* defined(__cplusplus) */
#endif // CRC32C_CRC32C_H_

View file

@ -0,0 +1,43 @@
// Copyright 2017 The CRC32C Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef CRC32C_CRC32C_CONFIG_H_
#define CRC32C_CRC32C_CONFIG_H_
// Define to 1 if building for a big-endian platform.
//#cmakedefine01 BYTE_ORDER_BIG_ENDIAN
// Define to 1 if the compiler has the __builtin_prefetch intrinsic.
#define HAVE_BUILTIN_PREFETCH 1
// Define to 1 if targeting X86 and the compiler has the _mm_prefetch intrinsic.
#if HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
#define HAVE_MM_PREFETCH 1
#endif
// Define to 1 if targeting X86 and the compiler has the _mm_crc32_u{8,32,64}
// intrinsics.
#if defined(__i386) || defined(__x86_64) || defined(_M_IX86)
//#define HAVE_SSE42 1
#endif
// Define to 1 if targeting ARM and the compiler has the __crc32c{b,h,w,d} and
// the vmull_p64 intrinsics.
#if defined(__aarch64__)
//#define HAVE_ARM64_CRC32C 1
#endif
// Define to 1 if the system libraries have the getauxval function in the
// <sys/auxv.h> header. Should be true on Linux and Android API level 20+.
#define HAVE_STRONG_GETAUXVAL 1
// Define to 1 if the compiler supports defining getauxval as a weak symbol.
// Should be true for any compiler that supports __attribute__((weak)).
#define HAVE_WEAK_GETAUXVAL
// Define to 1 if CRC32C tests have been built with Google Logging.
//#cmakedefine01 CRC32C_TESTS_BUILT_WITH_GLOG
#endif // CRC32C_CRC32C_CONFIG_H_

View file

@ -0,0 +1,27 @@
Copyright 2023 The Chromium Authors
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,2 @@
agrieve@chromium.org
smaier@chromium.org

View file

@ -0,0 +1,12 @@
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Chromium only applies this check when ENABLE_DCHECK=false.
-checkdiscard @org.jni_zero.CheckDiscard class ** {
*;
}
-checkdiscard class ** {
@org.jni_zero.CheckDiscard *;
}

View file

@ -0,0 +1,255 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/jni_zero/core.h"
#include <sys/prctl.h>
#include "third_party/jni_zero/logging.h"
namespace jni_zero {
namespace {
// Until we fully migrate base's jni_android, we will maintain a copy of this
// global here and will have base set this variable when it sets its own.
JavaVM* g_jvm = nullptr;
jclass (*g_class_resolver)(JNIEnv*, const char*, const char*) = nullptr;
void (*g_exception_handler_callback)(JNIEnv*) = nullptr;
ScopedJavaLocalRef<jclass> GetClassInternal(JNIEnv* env,
const char* class_name,
const char* split_name) {
jclass clazz;
if (g_class_resolver != nullptr) {
clazz = g_class_resolver(env, class_name, split_name);
} else {
clazz = env->FindClass(class_name);
}
if (ClearException(env) || !clazz) {
JNI_ZERO_FLOG("Failed to find class %s", class_name);
}
return ScopedJavaLocalRef<jclass>(env, clazz);
}
} // namespace
JNIEnv* AttachCurrentThread() {
JNI_ZERO_DCHECK(g_jvm);
JNIEnv* env = nullptr;
jint ret = g_jvm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_2);
if (ret == JNI_EDETACHED || !env) {
JavaVMAttachArgs args;
args.version = JNI_VERSION_1_2;
args.group = nullptr;
// 16 is the maximum size for thread names on Android.
char thread_name[16];
int err = prctl(PR_GET_NAME, thread_name);
if (err < 0) {
JNI_ZERO_ELOG("prctl(PR_GET_NAME)");
args.name = nullptr;
} else {
args.name = thread_name;
}
#if defined(JNI_ZERO_IS_ROBOLECTRIC)
ret = g_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), &args);
#else
ret = g_jvm->AttachCurrentThread(&env, &args);
#endif
JNI_ZERO_CHECK(ret == JNI_OK);
}
return env;
}
JNIEnv* AttachCurrentThreadWithName(const std::string& thread_name) {
JNI_ZERO_DCHECK(g_jvm);
JavaVMAttachArgs args;
args.version = JNI_VERSION_1_2;
args.name = const_cast<char*>(thread_name.c_str());
args.group = nullptr;
JNIEnv* env = nullptr;
#if defined(JNI_ZERO_IS_ROBOLECTRIC)
jint ret = g_jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), &args);
#else
jint ret = g_jvm->AttachCurrentThread(&env, &args);
#endif
JNI_ZERO_CHECK(ret == JNI_OK);
return env;
}
void DetachFromVM() {
// Ignore the return value, if the thread is not attached, DetachCurrentThread
// will fail. But it is ok as the native thread may never be attached.
if (g_jvm) {
g_jvm->DetachCurrentThread();
}
}
void InitVM(JavaVM* vm) {
g_jvm = vm;
}
void DisableJvmForTesting() {
g_jvm = nullptr;
}
bool IsVMInitialized() {
return g_jvm != nullptr;
}
JavaVM* GetVM() {
return g_jvm;
}
bool HasException(JNIEnv* env) {
return env->ExceptionCheck() != JNI_FALSE;
}
bool ClearException(JNIEnv* env) {
if (!HasException(env)) {
return false;
}
env->ExceptionDescribe();
env->ExceptionClear();
return true;
}
void SetExceptionHandler(void (*callback)(JNIEnv*)) {
g_exception_handler_callback = callback;
}
void CheckException(JNIEnv* env) {
if (!HasException(env)) {
return;
}
if (g_exception_handler_callback) {
return g_exception_handler_callback(env);
}
JNI_ZERO_FLOG("jni_zero crashing due to uncaught Java exception");
}
void SetClassResolver(jclass (*resolver)(JNIEnv*, const char*, const char*)) {
g_class_resolver = resolver;
}
ScopedJavaLocalRef<jclass> GetClass(JNIEnv* env,
const char* class_name,
const char* split_name) {
return GetClassInternal(env, class_name, split_name);
}
ScopedJavaLocalRef<jclass> GetClass(JNIEnv* env, const char* class_name) {
return GetClassInternal(env, class_name, "");
}
// This is duplicated with LazyGetClass below because these are performance
// sensitive.
jclass LazyGetClass(JNIEnv* env,
const char* class_name,
const char* split_name,
std::atomic<jclass>* atomic_class_id) {
const jclass value = atomic_class_id->load(std::memory_order_acquire);
if (value) {
return value;
}
ScopedJavaGlobalRef<jclass> clazz;
clazz.Reset(GetClass(env, class_name, split_name));
jclass cas_result = nullptr;
if (atomic_class_id->compare_exchange_strong(cas_result, clazz.obj(),
std::memory_order_acq_rel)) {
// We intentionally leak the global ref since we are now storing it as a raw
// pointer in |atomic_class_id|.
return clazz.Release();
} else {
return cas_result;
}
}
// This is duplicated with LazyGetClass above because these are performance
// sensitive.
jclass LazyGetClass(JNIEnv* env,
const char* class_name,
std::atomic<jclass>* atomic_class_id) {
const jclass value = atomic_class_id->load(std::memory_order_acquire);
if (value) {
return value;
}
ScopedJavaGlobalRef<jclass> clazz;
clazz.Reset(GetClass(env, class_name));
jclass cas_result = nullptr;
if (atomic_class_id->compare_exchange_strong(cas_result, clazz.obj(),
std::memory_order_acq_rel)) {
// We intentionally leak the global ref since we are now storing it as a raw
// pointer in |atomic_class_id|.
return clazz.Release();
} else {
return cas_result;
}
}
template <MethodID::Type type>
jmethodID MethodID::Get(JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature) {
auto get_method_ptr = type == MethodID::TYPE_STATIC
? &JNIEnv::GetStaticMethodID
: &JNIEnv::GetMethodID;
jmethodID id = (env->*get_method_ptr)(clazz, method_name, jni_signature);
if (ClearException(env) || !id) {
JNI_ZERO_FLOG("Failed to find class %smethod %s %s",
(type == TYPE_STATIC ? "static " : ""), method_name,
jni_signature);
}
return id;
}
// If |atomic_method_id| set, it'll return immediately. Otherwise, it'll call
// into ::Get() above. If there's a race, it's ok since the values are the same
// (and the duplicated effort will happen only once).
template <MethodID::Type type>
jmethodID MethodID::LazyGet(JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id) {
const jmethodID value = atomic_method_id->load(std::memory_order_acquire);
if (value) {
return value;
}
jmethodID id = MethodID::Get<type>(env, clazz, method_name, jni_signature);
atomic_method_id->store(id, std::memory_order_release);
return id;
}
// Various template instantiations.
template jmethodID MethodID::Get<MethodID::TYPE_STATIC>(
JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature);
template jmethodID MethodID::Get<MethodID::TYPE_INSTANCE>(
JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature);
template jmethodID MethodID::LazyGet<MethodID::TYPE_STATIC>(
JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id);
template jmethodID MethodID::LazyGet<MethodID::TYPE_INSTANCE>(
JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id);
} // namespace jni_zero

View file

@ -0,0 +1,125 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef JNI_ZERO_CORE_H_
#define JNI_ZERO_CORE_H_
#include <jni.h>
#include <atomic>
#include <string>
#include "third_party/jni_zero/jni_export.h"
#include "third_party/jni_zero/scoped_java_ref.h"
namespace jni_zero {
// Attaches the current thread to the VM (if necessary) and return the JNIEnv*.
JNI_ZERO_COMPONENT_BUILD_EXPORT JNIEnv* AttachCurrentThread();
// Same to AttachCurrentThread except that thread name will be set to
// |thread_name| if it is the first call. Otherwise, thread_name won't be
// changed. AttachCurrentThread() doesn't regard underlying platform thread
// name, but just resets it to "Thread-???". This function should be called
// right after new thread is created if it is important to keep thread name.
JNI_ZERO_COMPONENT_BUILD_EXPORT JNIEnv* AttachCurrentThreadWithName(
const std::string& thread_name);
// Detaches the current thread from VM if it is attached.
JNI_ZERO_COMPONENT_BUILD_EXPORT void DetachFromVM();
// Initializes the global JVM.
JNI_ZERO_COMPONENT_BUILD_EXPORT void InitVM(JavaVM* vm);
// Returns true if the global JVM has been initialized.
JNI_ZERO_COMPONENT_BUILD_EXPORT bool IsVMInitialized();
// Returns the global JVM, or nullptr if it has not been initialized.
JNI_ZERO_COMPONENT_BUILD_EXPORT JavaVM* GetVM();
// Do not allow any future native->java calls.
// This is necessary in gtest DEATH_TESTS to prevent
// GetJavaStackTraceIfPresent() from accessing a defunct JVM (due to fork()).
// https://crbug.com/1484834
JNI_ZERO_COMPONENT_BUILD_EXPORT void DisableJvmForTesting();
JNI_ZERO_COMPONENT_BUILD_EXPORT void SetExceptionHandler(
void (*callback)(JNIEnv*));
// Returns true if an exception is pending in the provided JNIEnv*.
JNI_ZERO_COMPONENT_BUILD_EXPORT bool HasException(JNIEnv* env);
// If an exception is pending in the provided JNIEnv*, this function clears it
// and returns true.
JNI_ZERO_COMPONENT_BUILD_EXPORT bool ClearException(JNIEnv* env);
// If there's any pending exception, this function will call the set exception
// handler, or if none are set, it will fatally LOG.
JNI_ZERO_COMPONENT_BUILD_EXPORT void CheckException(JNIEnv* env);
// Sets a function to call instead of just using JNIEnv.FindClass. Useful for
// chrome's "splits" which need to be resolved in special ClassLoaders. The
// class name parameter (first string) will be given in package.dot.Format. The
// second parameter is the split name, which will just be an empty string if not
// used.
JNI_ZERO_COMPONENT_BUILD_EXPORT void SetClassResolver(
jclass (*resolver)(JNIEnv*, const char*, const char*));
// Finds the class named |class_name| and returns it.
// Use this method instead of invoking directly the JNI FindClass method (to
// prevent leaking local references).
// This method triggers a fatal assertion if the class could not be found.
// Use HasClass if you need to check whether the class exists.
JNI_ZERO_COMPONENT_BUILD_EXPORT ScopedJavaLocalRef<jclass>
GetClass(JNIEnv* env, const char* class_name, const char* split_name);
JNI_ZERO_COMPONENT_BUILD_EXPORT ScopedJavaLocalRef<jclass> GetClass(
JNIEnv* env,
const char* class_name);
// The method will initialize |atomic_class_id| to contain a global ref to the
// class. And will return that ref on subsequent calls. It's the caller's
// responsibility to release the ref when it is no longer needed.
// The caller is responsible to zero-initialize |atomic_method_id|.
// It's fine to simultaneously call this on multiple threads referencing the
// same |atomic_method_id|.
JNI_ZERO_COMPONENT_BUILD_EXPORT jclass
LazyGetClass(JNIEnv* env,
const char* class_name,
const char* split_name,
std::atomic<jclass>* atomic_class_id);
JNI_ZERO_COMPONENT_BUILD_EXPORT jclass
LazyGetClass(JNIEnv* env,
const char* class_name,
std::atomic<jclass>* atomic_class_id);
// This class is a wrapper for JNIEnv Get(Static)MethodID.
class JNI_ZERO_COMPONENT_BUILD_EXPORT MethodID {
public:
enum Type {
TYPE_STATIC,
TYPE_INSTANCE,
};
// Returns the method ID for the method with the specified name and signature.
// This method triggers a fatal assertion if the method could not be found.
template <Type type>
static jmethodID Get(JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature);
// The caller is responsible to zero-initialize |atomic_method_id|.
// It's fine to simultaneously call this on multiple threads referencing the
// same |atomic_method_id|.
template <Type type>
static jmethodID LazyGet(JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id);
};
} // namespace jni_zero
#endif // JNI_ZERO_CORE_H_

View file

@ -0,0 +1,49 @@
// This file was generated by
// //third_party/jni_zero/jni_zero.py
// For
// org.jni_zero.JniInit
#ifndef org_jni_1zero_JniInit_JNI
#define org_jni_1zero_JniInit_JNI
#include <jni.h>
#include "third_party/jni_zero/jni_export.h"
#include "../../../../../../../third_party/jni_zero/jni_zero_internal.h"
// Class Accessors
#ifndef org_jni_1zero_JniInit_clazz_defined
#define org_jni_1zero_JniInit_clazz_defined
inline jclass org_jni_1zero_JniInit_clazz(JNIEnv* env) {
static const char kClassName[] = "org/jni_zero/JniInit";
static std::atomic<jclass> cached_class;
return jni_zero::internal::LazyGetClass(env, kClassName, &cached_class);
}
#endif
namespace jni_zero {
// Native to Java functions
static void Java_JniInit_crashIfMultiplexingMisaligned(
JNIEnv* env,
jlong wholeHash,
jlong priorityHash) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = org_jni_1zero_JniInit_clazz(env);
CHECK_CLAZZ(env, clazz, clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"crashIfMultiplexingMisaligned",
"(JJ)V",
&cached_method_id);
env->CallStaticVoidMethod(clazz, call_context.method_id(), wholeHash, priorityHash);
}
} // namespace jni_zero
#endif // org_jni_1zero_JniInit_JNI

View file

@ -0,0 +1,63 @@
// This file was generated by
// //third_party/jni_zero/jni_zero.py
// For
// org.jni_zero.JniUtil
#ifndef org_jni_1zero_JniUtil_JNI
#define org_jni_1zero_JniUtil_JNI
#include <jni.h>
#include "third_party/jni_zero/jni_export.h"
#include "../../../../../../../third_party/jni_zero/jni_zero_internal.h"
// Class Accessors
#ifndef org_jni_1zero_JniUtil_clazz_defined
#define org_jni_1zero_JniUtil_clazz_defined
inline jclass org_jni_1zero_JniUtil_clazz(JNIEnv* env) {
static const char kClassName[] = "org/jni_zero/JniUtil";
static std::atomic<jclass> cached_class;
return jni_zero::internal::LazyGetClass(env, kClassName, &cached_class);
}
#endif
// Native to Java functions
static jni_zero::ScopedJavaLocalRef<jobject> Java_JniUtil_arrayToMap(
JNIEnv* env,
const jni_zero::JavaRef<jobjectArray>& array) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = org_jni_1zero_JniUtil_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"arrayToMap",
"([Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), array.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
static jni_zero::ScopedJavaLocalRef<jobjectArray> Java_JniUtil_mapToArray(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& map) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = org_jni_1zero_JniUtil_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"mapToArray",
"(Ljava/util/Map;)[Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), map.obj());
jobjectArray _ret2 = static_cast<jobjectArray>(_ret);
return jni_zero::ScopedJavaLocalRef<jobjectArray>(env, _ret2);
}
#endif // org_jni_1zero_JniUtil_JNI

View file

@ -0,0 +1,17 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/** Applied to fields that are accessed from native via JNI. Causes R8 to not rename them. */
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.CLASS)
public @interface AccessedByNative {
public String value() default "";
}

View file

@ -0,0 +1,28 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Used by the JNI generator to create the necessary JNI bindings and expose this method to native
* code.
*
* <p>Any uncaught Java exceptions will crash the current process. This is generally the desired
* behavior, since most exceptions indicate an unexpected error. If your java method expects an
* exception, we recommend refactoring to catch exceptions and indicate errors with special return
* values instead. If this is not possible, see {@link CalledByNativeUnchecked} instead.
*/
@Target({ElementType.CONSTRUCTOR, ElementType.METHOD})
@Retention(RetentionPolicy.CLASS)
public @interface CalledByNative {
/*
* If present, tells which inner class the method belongs to.
*/
public String value() default "";
}

View file

@ -0,0 +1,26 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Used by the JNI generator to create the necessary JNI bindings and expose this method to native
* test-only code.
*
* <p>Any method annotated by this will be kept around for tests only. If you wish to call your
* method from non-test code, see {@link CalledByNative} instead.
*/
@Target({ElementType.CONSTRUCTOR, ElementType.METHOD})
@Retention(RetentionPolicy.CLASS)
public @interface CalledByNativeForTesting {
/*
* If present, tells which inner class the method belongs to.
*/
public String value() default "";
}

View file

@ -0,0 +1,29 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Similar to {@link CalledByNative}, this also exposes JNI bindings to native code. The main
* difference is this <b>will not</b> crash the browser process if the Java method throws an
* exception. However, the C++ caller <b>must</b> handle and clear the exception before calling into
* any other Java code, otherwise the next Java method call will crash (with the previous call's
* exception, which leads to a very confusing debugging experience).
*
* <p>Usage of this annotation should be very rare; due to the complexity of correctly handling
* exceptions in C++, prefer using {@link CalledByNative}.
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.CLASS)
public @interface CalledByNativeUnchecked {
/*
* If present, tells which inner class the method belongs to.
*/
public String value() default "";
}

View file

@ -0,0 +1,25 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Causes build to assert that annotated classes / methods / fields are optimized away in release
* builds (when using checkdiscard_proguard.flags).
*/
@Target({ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE})
@Retention(RetentionPolicy.CLASS)
public @interface CheckDiscard {
/**
* Describes why the element should be discarded.
*
* @return reason for discarding (crbug links are preferred unless reason is trivial).
*/
String value();
}

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* @JNINamespace is used by the JNI generator to create the necessary JNI bindings and expose this
* method to native code using the specified namespace.
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
public @interface JNINamespace {
public String value();
}

View file

@ -0,0 +1,15 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
/**
* Implemented by the TEST_HOOKS field in JNI wrapper classes that are generated
* by the JNI annotation processor. Used in tests for setting the mock
* implementation of a {@link org.chromium.base.annotations.NativeMethods} interface.
* @param <T> The interface annotated with {@link org.chromium.base.annotations.NativeMethods}
*/
public interface JniStaticTestMocker<T> {
void setInstanceForTesting(T instance);
}

View file

@ -0,0 +1,25 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* @NativeClassQualifiedName is used by the JNI generator to create the necessary JNI bindings to
* call into the specified native class name.
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface NativeClassQualifiedName {
/*
* Tells which native class the method is going to be bound to.
* The first parameter of the annotated method must be an int nativePtr pointing to
* an instance of this class.
*/
public String value();
}

View file

@ -0,0 +1,44 @@
// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import org.chromium.build.BuildConfig;
/**
* Exposes native library loading status.
*/
public class NativeLibraryLoadedStatus {
/**
* Interface for querying native method availability.
*/
public interface NativeLibraryLoadedStatusProvider {
boolean areNativeMethodsReady();
}
private static NativeLibraryLoadedStatusProvider sProvider;
public static class NativeNotLoadedException extends RuntimeException {
public NativeNotLoadedException(String s) {
super(s);
}
}
public static void checkLoaded() {
if (sProvider == null) return;
if (!sProvider.areNativeMethodsReady()) {
throw new NativeNotLoadedException(
"Native method called before the native library was ready.");
}
}
public static void setProvider(NativeLibraryLoadedStatusProvider statusProvider) {
sProvider = statusProvider;
}
public static NativeLibraryLoadedStatusProvider getProviderForTesting() {
return sProvider;
}
}

View file

@ -0,0 +1,20 @@
// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.jni_zero;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.SOURCE)
public @interface NativeMethods {
/**
* Tells the build system to call a different GEN_JNI, prefixed by the value we put here. This
* should only be used for feature modules where we need a different GEN_JNI. For example, if
* you did @NativeMethods("dfmname"), this would call into dfmname_GEN_JNI.java.
*/
public String value() default "";
}

View file

@ -0,0 +1,24 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef JNI_ZERO_JNI_EXPORT_H_
#define JNI_ZERO_JNI_EXPORT_H_
#if defined(__i386__)
// Dalvik JIT generated code doesn't guarantee 16-byte stack alignment on
// x86 - use force_align_arg_pointer to realign the stack at the JNI
// boundary. crbug.com/655248
#define JNI_BOUNDARY_EXPORT \
extern "C" __attribute__((visibility("default"), force_align_arg_pointer))
#else
#define JNI_BOUNDARY_EXPORT extern "C" __attribute__((visibility("default")))
#endif
#if defined(COMPONENT_BUILD)
#define JNI_ZERO_COMPONENT_BUILD_EXPORT __attribute__((visibility("default")))
#else
#define JNI_ZERO_COMPONENT_BUILD_EXPORT
#endif
#endif // JNI_ZERO_JNI_EXPORT_H_

View file

@ -0,0 +1,58 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef JNI_ZERO_JNI_INT_WRAPPER_H_
#define JNI_ZERO_JNI_INT_WRAPPER_H_
// Wrapper used to receive int when calling Java from native.
// The wrapper disallows automatic conversion of long to int.
// This is to avoid a common anti-pattern where a Java int is used
// to receive a native pointer. Please use a Java long to receive
// native pointers, so that the code works on both 32-bit and 64-bit
// platforms. Note the wrapper allows other lossy conversions into
// jint that could be consider anti-patterns, such as from size_t.
// Checking is only done in debugging builds.
#ifdef NDEBUG
typedef jint JniIntWrapper;
// This inline is sufficiently trivial that it does not change the
// final code generated by g++.
inline jint as_jint(JniIntWrapper wrapper) {
return wrapper;
}
#else
class JniIntWrapper {
public:
JniIntWrapper() : i_(0) {}
JniIntWrapper(int i) : i_(i) {}
JniIntWrapper(const JniIntWrapper& ji) : i_(ji.i_) {}
template <class T>
JniIntWrapper(const T& t) : i_(t) {}
jint as_jint() const { return i_; }
private:
// If you get an "is private" error at the line below it is because you used
// an implicit conversion to convert a long to an int when calling Java.
// We disallow this, as a common anti-pattern allows converting a native
// pointer (intptr_t) to a Java int. Please use a Java long to represent
// a native pointer. If you want a lossy conversion, please use an
// explicit conversion in your C++ code. Note an error is only seen when
// compiling on a 64-bit platform, as intptr_t is indistinguishable from
// int on 32-bit platforms.
JniIntWrapper(long);
jint i_;
};
inline jint as_jint(const JniIntWrapper& wrapper) {
return wrapper.as_jint();
}
#endif // NDEBUG
#endif // JNI_ZERO_JNI_INT_WRAPPER_H_

View file

@ -0,0 +1,117 @@
// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef JNI_ZERO_JNI_ZERO_HELPER_H_
#define JNI_ZERO_JNI_ZERO_HELPER_H_
#include <jni.h>
#include "third_party/jni_zero/core.h"
#include "third_party/jni_zero/jni_export.h"
#include "third_party/jni_zero/jni_int_wrapper.h"
#include "third_party/jni_zero/logging.h"
// Project-specific macros used by the header files generated by
// jni_generator.py. Different projects can then specify their own
// implementation for this file.
#define CHECK_NATIVE_PTR(env, jcaller, native_ptr, method_name, ...) \
JNI_ZERO_DCHECK(native_ptr);
#define CHECK_CLAZZ(env, jcaller, clazz, ...) JNI_ZERO_DCHECK(clazz);
#if defined(__clang__) && __has_attribute(noinline)
#define JNI_ZERO_NOINLINE [[clang::noinline]]
#elif __has_attribute(noinline)
#define JNI_ZERO_NOINLINE __attribute__((noinline))
#endif
#if defined(__clang__) && defined(NDEBUG) && __has_attribute(always_inline)
#define JNI_ZERO_ALWAYS_INLINE [[clang::always_inline]] inline
#elif defined(NDEBUG) && __has_attribute(always_inline)
#define JNI_ZERO_ALWAYS_INLINE inline __attribute__((__always_inline__))
#else
#define JNI_ZERO_ALWAYS_INLINE inline
#endif
namespace jni_zero {
inline void HandleRegistrationError(JNIEnv* env,
jclass clazz,
const char* filename) {
JNI_ZERO_ELOG("RegisterNatives failed in %s", filename);
}
// A 32 bit number could be an address on stack. Random 64 bit marker on the
// stack is much less likely to be present on stack.
constexpr uint64_t kJniStackMarkerValue = 0xbdbdef1bebcade1b;
// Context about the JNI call with exception checked to be stored in stack.
struct JNI_ZERO_COMPONENT_BUILD_EXPORT JniJavaCallContextUnchecked {
JNI_ZERO_ALWAYS_INLINE JniJavaCallContextUnchecked() {
// TODO(ssid): Implement for other architectures.
#if defined(__arm__) || defined(__aarch64__)
// This assumes that this method does not increment the stack pointer.
asm volatile("mov %0, sp" : "=r"(sp));
#else
sp = 0;
#endif
}
// Force no inline to reduce code size.
template <MethodID::Type type>
JNI_ZERO_NOINLINE void Init(JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id) {
env1 = env;
// Make sure compiler doesn't optimize out the assignment.
memcpy(&marker, &kJniStackMarkerValue, sizeof(kJniStackMarkerValue));
// Gets PC of the calling function.
pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
method_id = MethodID::LazyGet<type>(env, clazz, method_name, jni_signature,
atomic_method_id);
}
JNI_ZERO_NOINLINE ~JniJavaCallContextUnchecked() {
// Reset so that spurious marker finds are avoided.
memset(&marker, 0, sizeof(marker));
}
uint64_t marker;
uintptr_t sp;
uintptr_t pc;
JNIEnv* env1;
jmethodID method_id;
};
// Context about the JNI call with exception unchecked to be stored in stack.
struct JNI_ZERO_COMPONENT_BUILD_EXPORT JniJavaCallContextChecked {
// Force no inline to reduce code size.
template <MethodID::Type type>
JNI_ZERO_NOINLINE void Init(JNIEnv* env,
jclass clazz,
const char* method_name,
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id) {
base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id);
// Reset |pc| to correct caller.
base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
}
JNI_ZERO_NOINLINE ~JniJavaCallContextChecked() { CheckException(base.env1); }
JniJavaCallContextUnchecked base;
};
static_assert(sizeof(JniJavaCallContextChecked) ==
sizeof(JniJavaCallContextUnchecked),
"Stack unwinder cannot work with structs of different sizes.");
} // namespace jni_zero
#endif // JNI_ZERO_JNI_ZERO_HELPER_H_

View file

@ -0,0 +1,76 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/jni_zero/logging.h"
#include <stdarg.h>
#include <stdio.h>
#include <atomic>
#include <memory>
#ifndef JNI_ZERO_IS_ROBOLECTRIC
#include <android/log.h>
#endif
namespace jni_zero {
std::atomic<LogMessageCallback> g_log_callback{};
void SetLogMessageCallback(LogMessageCallback callback) {
g_log_callback.store(callback, std::memory_order_relaxed);
}
void LogMessage(LogLev level,
const char* fname,
int line,
const char* fmt,
...) {
char stack_buf[512];
std::unique_ptr<char[]> large_buf;
char* log_msg = &stack_buf[0];
// By default use a stack allocated buffer because most log messages are quite
// short. In rare cases they can be larger (e.g. --help). In those cases we
// pay the cost of allocating the buffer on the heap.
for (size_t max_len = sizeof(stack_buf);;) {
va_list args;
va_start(args, fmt);
int res = vsnprintf(log_msg, max_len, fmt, args);
va_end(args);
// If for any reason the print fails, overwrite the message but still print
// it. The code below will attach the filename and line, which is still
// useful.
if (res < 0) {
snprintf(log_msg, max_len, "%s", "[printf format error]");
break;
}
// if res == max_len, vsnprintf saturated the input buffer. Retry with a
// larger buffer in that case (within reasonable limits).
if (res < static_cast<int>(max_len) || max_len >= 128 * 1024) {
break;
}
max_len *= 4;
large_buf.reset(new char[max_len]);
log_msg = &large_buf[0];
}
LogMessageCallback cb = g_log_callback.load(std::memory_order_relaxed);
if (cb) {
cb({level, line, fname, log_msg});
return;
}
#ifdef JNI_ZERO_IS_ROBOLECTRIC
fprintf(stderr, "%s:%d %s\n", fname, line, log_msg);
#else
__android_log_print(int{ANDROID_LOG_DEBUG} + level, "jni_zero", "%s:%d %s",
fname, line, log_msg);
#endif
if (level >= kLogFatal) {
JNI_ZERO_IMMEDIATE_CRASH();
}
}
} // namespace jni_zero

View file

@ -0,0 +1,90 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef JNI_ZERO_LOGGING_H_
#define JNI_ZERO_LOGGING_H_
#include "third_party/jni_zero/jni_export.h"
#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
#define JNI_ZERO_DCHECK_IS_ON() false
#else
#define JNI_ZERO_DCHECK_IS_ON() true
#endif
// Simplified version of Google's logging. Adapted from perfetto's
// implementation.
namespace jni_zero {
// Constexpr functions to extract basename(__FILE__), e.g.: ../foo/f.c -> f.c .
constexpr const char* StrEnd(const char* s) {
return *s ? StrEnd(s + 1) : s;
}
constexpr const char* BasenameRecursive(const char* s,
const char* begin,
const char* end) {
return (*s == '/' && s < end)
? (s + 1)
: ((s > begin) ? BasenameRecursive(s - 1, begin, end) : s);
}
constexpr const char* Basename(const char* str) {
return BasenameRecursive(StrEnd(str), str, StrEnd(str));
}
enum LogLev { kLogInfo = 0, kLogError, kLogFatal };
struct LogMessageCallbackArgs {
LogLev level;
int line;
const char* filename;
const char* message;
};
using LogMessageCallback = void (*)(LogMessageCallbackArgs);
// This is not thread safe and must be called before using tracing from other
// threads.
JNI_ZERO_COMPONENT_BUILD_EXPORT void SetLogMessageCallback(
LogMessageCallback callback);
JNI_ZERO_COMPONENT_BUILD_EXPORT void LogMessage(LogLev,
const char* fname,
int line,
const char* fmt,
...)
__attribute__((__format__(__printf__, 4, 5)));
#define JNI_ZERO_IMMEDIATE_CRASH() \
do { \
__builtin_trap(); \
__builtin_unreachable(); \
} while (0)
#define JNI_ZERO_XLOG(level, fmt, ...) \
::jni_zero::LogMessage(level, ::jni_zero::Basename(__FILE__), __LINE__, fmt, \
##__VA_ARGS__)
#define JNI_ZERO_ILOG(fmt, ...) \
JNI_ZERO_XLOG(::jni_zero::kLogInfo, fmt, ##__VA_ARGS__)
#define JNI_ZERO_ELOG(fmt, ...) \
JNI_ZERO_XLOG(::jni_zero::kLogError, fmt, ##__VA_ARGS__)
#define JNI_ZERO_FLOG(fmt, ...) \
JNI_ZERO_XLOG(::jni_zero::kLogFatal, fmt, ##__VA_ARGS__)
#define JNI_ZERO_CHECK(x) \
do { \
if (__builtin_expect(!(x), 0)) { \
JNI_ZERO_FLOG("%s", "JNI_ZERO_CHECK(" #x ")"); \
} \
} while (0)
#if JNI_ZERO_DCHECK_IS_ON()
#define JNI_ZERO_DCHECK(x) JNI_ZERO_CHECK(x)
#else
#define JNI_ZERO_DCHECK(x) \
do { \
} while (false && (x))
#endif
} // namespace jni_zero
#endif // JNI_ZERO_LOGGING_H_

View file

@ -0,0 +1,14 @@
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Keeps for method level annotations.
-keepclasseswithmembers,allowaccessmodification class ** {
@org.jni_zero.AccessedByNative <fields>;
}
-keepclasseswithmembers,includedescriptorclasses,allowaccessmodification class ** {
@org.jni_zero.CalledByNative <methods>;
}
-keepclasseswithmembers,includedescriptorclasses,allowaccessmodification class ** {
@org.jni_zero.CalledByNativeUnchecked <methods>;
}

View file

@ -0,0 +1,7 @@
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-keepclasseswithmembers,includedescriptorclasses,allowaccessmodification class ** {
@org.jni_zero.CalledByNativeForTesting <methods>;
}

View file

@ -0,0 +1,93 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/jni_zero/scoped_java_ref.h"
#include "third_party/jni_zero/core.h"
namespace jni_zero {
namespace {
const int kDefaultLocalFrameCapacity = 16;
} // namespace
ScopedJavaLocalFrame::ScopedJavaLocalFrame(JNIEnv* env) : env_(env) {
int failed = env_->PushLocalFrame(kDefaultLocalFrameCapacity);
JNI_ZERO_DCHECK(!failed);
}
ScopedJavaLocalFrame::ScopedJavaLocalFrame(JNIEnv* env, int capacity)
: env_(env) {
int failed = env_->PushLocalFrame(capacity);
JNI_ZERO_DCHECK(!failed);
}
ScopedJavaLocalFrame::~ScopedJavaLocalFrame() {
env_->PopLocalFrame(nullptr);
}
#if JNI_ZERO_DCHECK_IS_ON()
// This constructor is inlined when DCHECKs are disabled; don't add anything
// else here.
JavaRef<jobject>::JavaRef(JNIEnv* env, jobject obj) : obj_(obj) {
if (obj) {
JNI_ZERO_DCHECK(env && env->GetObjectRefType(obj) == JNILocalRefType);
}
}
#endif
JNIEnv* JavaRef<jobject>::SetNewLocalRef(JNIEnv* env, jobject obj) {
if (!env) {
env = AttachCurrentThread();
} else {
JNI_ZERO_DCHECK(env == AttachCurrentThread()); // Is |env| on correct thread.
}
if (obj) {
obj = env->NewLocalRef(obj);
}
if (obj_) {
env->DeleteLocalRef(obj_);
}
obj_ = obj;
return env;
}
void JavaRef<jobject>::SetNewGlobalRef(JNIEnv* env, jobject obj) {
if (!env) {
env = AttachCurrentThread();
} else {
JNI_ZERO_DCHECK(env == AttachCurrentThread()); // Is |env| on correct thread.
}
if (obj) {
obj = env->NewGlobalRef(obj);
}
if (obj_) {
env->DeleteGlobalRef(obj_);
}
obj_ = obj;
}
void JavaRef<jobject>::ResetLocalRef(JNIEnv* env) {
if (obj_) {
JNI_ZERO_DCHECK(env == AttachCurrentThread()); // Is |env| on correct thread.
env->DeleteLocalRef(obj_);
obj_ = nullptr;
}
}
void JavaRef<jobject>::ResetGlobalRef() {
if (obj_) {
AttachCurrentThread()->DeleteGlobalRef(obj_);
obj_ = nullptr;
}
}
jobject JavaRef<jobject>::ReleaseInternal() {
jobject obj = obj_;
obj_ = nullptr;
return obj;
}
} // namespace jni_zero

View file

@ -0,0 +1,534 @@
// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef JNI_ZERO_SCOPED_JAVA_REF_H_
#define JNI_ZERO_SCOPED_JAVA_REF_H_
#include <jni.h>
#include <stddef.h>
#include <type_traits>
#include <utility>
#include "third_party/jni_zero/jni_export.h"
#include "third_party/jni_zero/logging.h"
namespace jni_zero {
// Creates a new local reference frame, in which at least a given number of
// local references can be created. Note that local references already created
// in previous local frames are still valid in the current local frame.
class JNI_ZERO_COMPONENT_BUILD_EXPORT ScopedJavaLocalFrame {
public:
explicit ScopedJavaLocalFrame(JNIEnv* env);
ScopedJavaLocalFrame(JNIEnv* env, int capacity);
ScopedJavaLocalFrame(const ScopedJavaLocalFrame&) = delete;
ScopedJavaLocalFrame& operator=(const ScopedJavaLocalFrame&) = delete;
~ScopedJavaLocalFrame();
private:
// This class is only good for use on the thread it was created on so
// it's safe to cache the non-threadsafe JNIEnv* inside this object.
JNIEnv* env_;
};
// Forward declare the generic java reference template class.
template <typename T>
class JavaRef;
// Template specialization of JavaRef, which acts as the base class for all
// other JavaRef<> template types. This allows you to e.g. pass
// ScopedJavaLocalRef<jstring> into a function taking const JavaRef<jobject>&
template <>
class JNI_ZERO_COMPONENT_BUILD_EXPORT JavaRef<jobject> {
public:
// Initializes a null reference.
constexpr JavaRef() {}
// Allow nullptr to be converted to JavaRef. This avoids having to declare an
// empty JavaRef just to pass null to a function, and makes C++ "nullptr" and
// Java "null" equivalent.
constexpr JavaRef(std::nullptr_t) {}
JavaRef(const JavaRef&) = delete;
JavaRef& operator=(const JavaRef&) = delete;
// Public to allow destruction of null JavaRef objects.
~JavaRef() {}
// TODO(torne): maybe rename this to get() for consistency with unique_ptr
// once there's fewer unnecessary uses of it in the codebase.
jobject obj() const { return obj_; }
explicit operator bool() const { return obj_ != nullptr; }
// Deprecated. Just use bool conversion.
// TODO(torne): replace usage and remove this.
bool is_null() const { return obj_ == nullptr; }
protected:
// Takes ownership of the |obj| reference passed; requires it to be a local
// reference type.
#if JNI_ZERO_DCHECK_IS_ON()
// Implementation contains a DCHECK; implement out-of-line when DCHECK_IS_ON.
JavaRef(JNIEnv* env, jobject obj);
#else
JavaRef(JNIEnv* env, jobject obj) : obj_(obj) {}
#endif
// Used for move semantics. obj_ must have been released first if non-null.
void steal(JavaRef&& other) {
obj_ = other.obj_;
other.obj_ = nullptr;
}
// The following are implementation detail convenience methods, for
// use by the sub-classes.
JNIEnv* SetNewLocalRef(JNIEnv* env, jobject obj);
void SetNewGlobalRef(JNIEnv* env, jobject obj);
void ResetLocalRef(JNIEnv* env);
void ResetGlobalRef();
jobject ReleaseInternal();
private:
jobject obj_ = nullptr;
};
// Forward declare the object array reader for the convenience function.
template <typename T>
class JavaObjectArrayReader;
// Generic base class for ScopedJavaLocalRef and ScopedJavaGlobalRef. Useful
// for allowing functions to accept a reference without having to mandate
// whether it is a local or global type.
template <typename T>
class JavaRef : public JavaRef<jobject> {
public:
constexpr JavaRef() {}
constexpr JavaRef(std::nullptr_t) {}
JavaRef(const JavaRef&) = delete;
JavaRef& operator=(const JavaRef&) = delete;
~JavaRef() {}
T obj() const { return static_cast<T>(JavaRef<jobject>::obj()); }
// Get a JavaObjectArrayReader for the array pointed to by this reference.
// Only defined for JavaRef<jobjectArray>.
// You must pass the type of the array elements (usually jobject) as the
// template parameter.
template <typename ElementType,
typename T_ = T,
typename = std::enable_if_t<std::is_same_v<T_, jobjectArray>>>
JavaObjectArrayReader<ElementType> ReadElements() const {
return JavaObjectArrayReader<ElementType>(*this);
}
protected:
JavaRef(JNIEnv* env, T obj) : JavaRef<jobject>(env, obj) {}
};
// Holds a local reference to a JNI method parameter.
// Method parameters should not be deleted, and so this class exists purely to
// wrap them as a JavaRef<T> in the JNI binding generator. Do not create
// instances manually.
template <typename T>
class JavaParamRef : public JavaRef<T> {
public:
// Assumes that |obj| is a parameter passed to a JNI method from Java.
// Does not assume ownership as parameters should not be deleted.
JavaParamRef(JNIEnv* env, T obj) : JavaRef<T>(env, obj) {}
// Allow nullptr to be converted to JavaParamRef. Some unit tests call JNI
// methods directly from C++ and pass null for objects which are not actually
// used by the implementation (e.g. the caller object); allow this to keep
// working.
JavaParamRef(std::nullptr_t) {}
JavaParamRef(const JavaParamRef&) = delete;
JavaParamRef& operator=(const JavaParamRef&) = delete;
~JavaParamRef() {}
// TODO(torne): remove this cast once we're using JavaRef consistently.
// http://crbug.com/506850
operator T() const { return JavaRef<T>::obj(); }
};
// Holds a local reference to a Java object. The local reference is scoped
// to the lifetime of this object.
// Instances of this class may hold onto any JNIEnv passed into it until
// destroyed. Therefore, since a JNIEnv is only suitable for use on a single
// thread, objects of this class must be created, used, and destroyed, on a
// single thread.
// Therefore, this class should only be used as a stack-based object and from a
// single thread. If you wish to have the reference outlive the current
// callstack (e.g. as a class member) or you wish to pass it across threads,
// use a ScopedJavaGlobalRef instead.
template <typename T>
class ScopedJavaLocalRef : public JavaRef<T> {
public:
// Take ownership of a bare jobject. This does not create a new reference.
// This should only be used by JNI helper functions, or in cases where code
// must call JNIEnv methods directly.
static ScopedJavaLocalRef Adopt(JNIEnv* env, T obj) {
return ScopedJavaLocalRef(env, obj);
}
constexpr ScopedJavaLocalRef() {}
constexpr ScopedJavaLocalRef(std::nullptr_t) {}
// Copy constructor. This is required in addition to the copy conversion
// constructor below.
ScopedJavaLocalRef(const ScopedJavaLocalRef& other) : env_(other.env_) {
JavaRef<T>::SetNewLocalRef(env_, other.obj());
}
// Copy conversion constructor.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef(const ScopedJavaLocalRef<U>& other) : env_(other.env_) {
JavaRef<T>::SetNewLocalRef(env_, other.obj());
}
// Move constructor. This is required in addition to the move conversion
// constructor below.
ScopedJavaLocalRef(ScopedJavaLocalRef&& other) : env_(other.env_) {
JavaRef<T>::steal(std::move(other));
}
// Move conversion constructor.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef(ScopedJavaLocalRef<U>&& other) : env_(other.env_) {
JavaRef<T>::steal(std::move(other));
}
// Constructor for other JavaRef types.
explicit ScopedJavaLocalRef(const JavaRef<T>& other) { Reset(other); }
// Assumes that |obj| is a local reference to a Java object and takes
// ownership of this local reference.
// TODO(torne): make legitimate uses call Adopt() instead, and make this
// private.
ScopedJavaLocalRef(JNIEnv* env, T obj) : JavaRef<T>(env, obj), env_(env) {}
~ScopedJavaLocalRef() { Reset(); }
// Null assignment, for disambiguation.
ScopedJavaLocalRef& operator=(std::nullptr_t) {
Reset();
return *this;
}
// Copy assignment.
ScopedJavaLocalRef& operator=(const ScopedJavaLocalRef& other) {
Reset(other);
return *this;
}
// Copy conversion assignment.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef& operator=(const ScopedJavaLocalRef<U>& other) {
Reset(other);
return *this;
}
// Move assignment.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef& operator=(ScopedJavaLocalRef<U>&& other) {
env_ = other.env_;
Reset();
JavaRef<T>::steal(std::move(other));
return *this;
}
// Assignment for other JavaRef types.
ScopedJavaLocalRef& operator=(const JavaRef<T>& other) {
Reset(other);
return *this;
}
void Reset() { JavaRef<T>::ResetLocalRef(env_); }
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
void Reset(const ScopedJavaLocalRef<U>& other) {
// We can copy over env_ here as |other| instance must be from the same
// thread as |this| local ref. (See class comment for multi-threading
// limitations, and alternatives).
env_ = JavaRef<T>::SetNewLocalRef(other.env_, other.obj());
}
void Reset(const JavaRef<T>& other) {
// If |env_| was not yet set (is still null) it will be attached to the
// current thread in SetNewLocalRef().
env_ = JavaRef<T>::SetNewLocalRef(env_, other.obj());
}
// Releases the local reference to the caller. The caller *must* delete the
// local reference when it is done with it. Note that calling a Java method
// is *not* a transfer of ownership and Release() should not be used.
T Release() { return static_cast<T>(JavaRef<T>::ReleaseInternal()); }
private:
// This class is only good for use on the thread it was created on so
// it's safe to cache the non-threadsafe JNIEnv* inside this object.
JNIEnv* env_ = nullptr;
// Prevent ScopedJavaLocalRef(JNIEnv*, T obj) from being used to take
// ownership of a JavaParamRef's underlying object - parameters are not
// allowed to be deleted and so should not be owned by ScopedJavaLocalRef.
// TODO(torne): this can be removed once JavaParamRef no longer has an
// implicit conversion back to T.
ScopedJavaLocalRef(JNIEnv* env, const JavaParamRef<T>& other);
// Friend required to get env_ from conversions.
template <typename U>
friend class ScopedJavaLocalRef;
// Avoids JavaObjectArrayReader having to accept and store its own env.
template <typename U>
friend class JavaObjectArrayReader;
};
// Holds a global reference to a Java object. The global reference is scoped
// to the lifetime of this object. This class does not hold onto any JNIEnv*
// passed to it, hence it is safe to use across threads (within the constraints
// imposed by the underlying Java object that it references).
template <typename T>
class ScopedJavaGlobalRef : public JavaRef<T> {
public:
constexpr ScopedJavaGlobalRef() {}
constexpr ScopedJavaGlobalRef(std::nullptr_t) {}
// Copy constructor. This is required in addition to the copy conversion
// constructor below.
ScopedJavaGlobalRef(const ScopedJavaGlobalRef& other) { Reset(other); }
// Copy conversion constructor.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef(const ScopedJavaGlobalRef<U>& other) {
Reset(other);
}
// Move constructor. This is required in addition to the move conversion
// constructor below.
ScopedJavaGlobalRef(ScopedJavaGlobalRef&& other) {
JavaRef<T>::steal(std::move(other));
}
// Move conversion constructor.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef(ScopedJavaGlobalRef<U>&& other) {
JavaRef<T>::steal(std::move(other));
}
// Conversion constructor for other JavaRef types.
explicit ScopedJavaGlobalRef(const JavaRef<T>& other) { Reset(other); }
// Create a new global reference to the object.
// Deprecated. Don't use bare jobjects; use a JavaRef as the input.
ScopedJavaGlobalRef(JNIEnv* env, T obj) { Reset(env, obj); }
~ScopedJavaGlobalRef() { Reset(); }
// Null assignment, for disambiguation.
ScopedJavaGlobalRef& operator=(std::nullptr_t) {
Reset();
return *this;
}
// Copy assignment.
ScopedJavaGlobalRef& operator=(const ScopedJavaGlobalRef& other) {
Reset(other);
return *this;
}
// Copy conversion assignment.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef& operator=(const ScopedJavaGlobalRef<U>& other) {
Reset(other);
return *this;
}
// Move assignment.
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef& operator=(ScopedJavaGlobalRef<U>&& other) {
Reset();
JavaRef<T>::steal(std::move(other));
return *this;
}
// Assignment for other JavaRef types.
ScopedJavaGlobalRef& operator=(const JavaRef<T>& other) {
Reset(other);
return *this;
}
void Reset() { JavaRef<T>::ResetGlobalRef(); }
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U, T>>>
void Reset(const ScopedJavaGlobalRef<U>& other) {
Reset(nullptr, other.obj());
}
void Reset(const JavaRef<T>& other) { Reset(nullptr, other.obj()); }
// Deprecated. You can just use Reset(const JavaRef&).
void Reset(JNIEnv* env, const JavaParamRef<T>& other) {
Reset(env, other.obj());
}
// Deprecated. Don't use bare jobjects; use a JavaRef as the input.
void Reset(JNIEnv* env, T obj) { JavaRef<T>::SetNewGlobalRef(env, obj); }
// Releases the global reference to the caller. The caller *must* delete the
// global reference when it is done with it. Note that calling a Java method
// is *not* a transfer of ownership and Release() should not be used.
T Release() { return static_cast<T>(JavaRef<T>::ReleaseInternal()); }
};
// Wrapper for a jobjectArray which supports input iteration, allowing Java
// arrays to be iterated over with a range-based for loop, or used with
// <algorithm> functions that accept input iterators.
//
// The iterator returns each object in the array in turn, wrapped in a
// ScopedJavaLocalRef<T>. T will usually be jobject, but if you know that the
// array contains a more specific type (such as jstring) you can use that
// instead. This does not check the type at runtime!
//
// The wrapper holds a local reference to the array and only queries the size of
// the array once, so must only be used as a stack-based object from the current
// thread.
//
// Note that this does *not* update the contents of the array if you mutate the
// returned ScopedJavaLocalRef.
template <typename T>
class JavaObjectArrayReader {
public:
class iterator {
public:
// We can only be an input iterator, as all richer iterator types must
// implement the multipass guarantee (always returning the same object for
// the same iterator position), which is not practical when returning
// temporary objects.
using iterator_category = std::input_iterator_tag;
using difference_type = ptrdiff_t;
using value_type = ScopedJavaLocalRef<T>;
// It doesn't make sense to return a reference type as the iterator creates
// temporary wrapper objects when dereferenced. Fortunately, it's not
// required that input iterators actually use references, and defining it
// as value_type is valid.
using reference = value_type;
// This exists to make operator-> work as expected: its return value must
// resolve to an actual pointer (otherwise the compiler just keeps calling
// operator-> on the return value until it does), so we need an extra level
// of indirection. This is sometimes called an "arrow proxy" or similar, and
// this version is adapted from base/value_iterators.h.
class pointer {
public:
explicit pointer(const reference& ref) : ref_(ref) {}
pointer(const pointer& ptr) = default;
pointer& operator=(const pointer& ptr) = delete;
reference* operator->() { return &ref_; }
private:
reference ref_;
};
iterator(const iterator&) = default;
~iterator() = default;
iterator& operator=(const iterator&) = default;
bool operator==(const iterator& other) const {
JNI_ZERO_DCHECK(reader_ == other.reader_);
return i_ == other.i_;
}
bool operator!=(const iterator& other) const {
JNI_ZERO_DCHECK(reader_ == other.reader_);
return i_ != other.i_;
}
reference operator*() const {
JNI_ZERO_DCHECK(i_ < reader_->size_);
// JNIEnv functions return unowned local references; take ownership with
// Adopt so that ~ScopedJavaLocalRef will release it automatically later.
return value_type::Adopt(
reader_->array_.env_,
static_cast<T>(reader_->array_.env_->GetObjectArrayElement(
reader_->array_.obj(), i_)));
}
pointer operator->() const { return pointer(operator*()); }
iterator& operator++() {
JNI_ZERO_DCHECK(i_ < reader_->size_);
++i_;
return *this;
}
iterator operator++(int) {
iterator old = *this;
++*this;
return old;
}
private:
iterator(const JavaObjectArrayReader* reader, jsize i)
: reader_(reader), i_(i) {}
const JavaObjectArrayReader<T>* reader_;
jsize i_;
friend JavaObjectArrayReader;
};
JavaObjectArrayReader(const JavaRef<jobjectArray>& array) : array_(array) {
size_ = array_.env_->GetArrayLength(array_.obj());
}
// Copy constructor to allow returning it from JavaRef::ReadElements().
JavaObjectArrayReader(const JavaObjectArrayReader& other) = default;
// Assignment operator for consistency with copy constructor.
JavaObjectArrayReader& operator=(const JavaObjectArrayReader& other) =
default;
// Allow move constructor and assignment since this owns a local ref.
JavaObjectArrayReader(JavaObjectArrayReader&& other) = default;
JavaObjectArrayReader& operator=(JavaObjectArrayReader&& other) = default;
bool empty() const { return size_ == 0; }
jsize size() const { return size_; }
iterator begin() const { return iterator(this, 0); }
iterator end() const { return iterator(this, size_); }
private:
ScopedJavaLocalRef<jobjectArray> array_;
jsize size_;
friend iterator;
};
} // namespace jni_zero
#endif // JNI_ZERO_SCOPED_JAVA_REF_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,385 @@
// This file was generated by
// //third_party/jni_zero/jni_zero.py
// For
// java.util.Collection
#ifndef java_util_Collection_JNI
#define java_util_Collection_JNI
#include <jni.h>
#include "third_party/jni_zero/jni_export.h"
#include "../../../../../../../third_party/jni_zero/jni_zero_internal.h"
// Class Accessors
#ifndef java_util_Collection_clazz_defined
#define java_util_Collection_clazz_defined
inline jclass java_util_Collection_clazz(JNIEnv* env) {
static const char kClassName[] = "java/util/Collection";
static std::atomic<jclass> cached_class;
return jni_zero::internal::LazyGetClass(env, kClassName, &cached_class);
}
#endif
namespace JNI_Collection {
// Native to Java functions
[[maybe_unused]] static jboolean Java_Collection_add(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"add",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_addAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"addAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static void Java_Collection_clear(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"clear",
"()V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id());
}
[[maybe_unused]] static jboolean Java_Collection_contains(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"contains",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_containsAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"containsAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_equals(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"equals",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jint Java_Collection_hashCode(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"hashCode",
"()I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_isEmpty(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"isEmpty",
"()Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Collection_iterator(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"iterator",
"()Ljava/util/Iterator;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Collection_parallelStream(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"parallelStream",
"()Ljava/util/stream/Stream;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_Collection_remove(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"remove",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_removeAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"removeAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_removeIf(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"removeIf",
"(Ljava/util/function/Predicate;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Collection_retainAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"retainAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jint Java_Collection_size(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"size",
"()I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Collection_spliterator(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"spliterator",
"()Ljava/util/Spliterator;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Collection_stream(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"stream",
"()Ljava/util/stream/Stream;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobjectArray> Java_Collection_toArray(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"toArray",
"()[Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
jobjectArray _ret2 = static_cast<jobjectArray>(_ret);
return jni_zero::ScopedJavaLocalRef<jobjectArray>(env, _ret2);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobjectArray> Java_Collection_toArray__java_util_function_IntFunction(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"toArray",
"(Ljava/util/function/IntFunction;)[Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj());
jobjectArray _ret2 = static_cast<jobjectArray>(_ret);
return jni_zero::ScopedJavaLocalRef<jobjectArray>(env, _ret2);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobjectArray> Java_Collection_toArray__ObjectArray(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobjectArray>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Collection_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"toArray",
"([Ljava/lang/Object;)[Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj());
jobjectArray _ret2 = static_cast<jobjectArray>(_ret);
return jni_zero::ScopedJavaLocalRef<jobjectArray>(env, _ret2);
}
} // namespace JNI_Collection
#endif // java_util_Collection_JNI

View file

@ -0,0 +1,994 @@
// This file was generated by
// //third_party/jni_zero/jni_zero.py
// For
// java.util.List
#ifndef java_util_List_JNI
#define java_util_List_JNI
#include <jni.h>
#include "third_party/jni_zero/jni_export.h"
#include "../../../../../../../third_party/jni_zero/jni_zero_internal.h"
// Class Accessors
#ifndef java_util_List_clazz_defined
#define java_util_List_clazz_defined
inline jclass java_util_List_clazz(JNIEnv* env) {
static const char kClassName[] = "java/util/List";
static std::atomic<jclass> cached_class;
return jni_zero::internal::LazyGetClass(env, kClassName, &cached_class);
}
#endif
namespace JNI_List {
// Native to Java functions
[[maybe_unused]] static jboolean Java_List_add(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"add",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static void Java_List_add(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"add",
"(ILjava/lang/Object;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), as_jint(p0), p1.obj());
}
[[maybe_unused]] static jboolean Java_List_addAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"addAll",
"(ILjava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), as_jint(p0), p1.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_List_addAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"addAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static void Java_List_addFirst(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"addFirst",
"(Ljava/lang/Object;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static void Java_List_addLast(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"addLast",
"(Ljava/lang/Object;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static void Java_List_clear(JNIEnv* env, const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"clear",
"()V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id());
}
[[maybe_unused]] static jboolean Java_List_contains(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"contains",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_List_containsAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"containsAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_copyOf(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"copyOf",
"(Ljava/util/Collection;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_List_equals(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"equals",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_get(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"get",
"(I)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), as_jint(p0));
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_getFirst(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"getFirst",
"()Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_getLast(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"getLast",
"()Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jint Java_List_hashCode(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"hashCode",
"()I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jint Java_List_indexOf(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"indexOf",
"(Ljava/lang/Object;)I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_List_isEmpty(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"isEmpty",
"()Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_iterator(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"iterator",
"()Ljava/util/Iterator;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jint Java_List_lastIndexOf(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"lastIndexOf",
"(Ljava/lang/Object;)I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_listIterator(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"listIterator",
"()Ljava/util/ListIterator;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_listIterator(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"listIterator",
"(I)Ljava/util/ListIterator;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), as_jint(p0));
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(JNIEnv* env) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"()Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of__Object(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_of__ObjectArray(
JNIEnv* env,
const jni_zero::JavaRef<jobjectArray>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"([Ljava/lang/Object;)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_List_remove__Object(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"remove",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_remove__int(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"remove",
"(I)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), as_jint(p0));
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_List_removeAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"removeAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_removeFirst(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"removeFirst",
"()Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_removeLast(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"removeLast",
"()Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static void Java_List_replaceAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"replaceAll",
"(Ljava/util/function/UnaryOperator;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static jboolean Java_List_retainAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"retainAll",
"(Ljava/util/Collection;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_reversed(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"reversed",
"()Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_reversed1(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"reversed",
"()Ljava/util/SequencedCollection;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_set(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"set",
"(ILjava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), as_jint(p0), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jint Java_List_size(JNIEnv* env, const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"size",
"()I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static void Java_List_sort(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"sort",
"(Ljava/util/Comparator;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_spliterator(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"spliterator",
"()Ljava/util/Spliterator;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_List_subList(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
JniIntWrapper p0,
JniIntWrapper p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"subList",
"(II)Ljava/util/List;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), as_jint(p0), as_jint(p1));
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobjectArray> Java_List_toArray(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"toArray",
"()[Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
jobjectArray _ret2 = static_cast<jobjectArray>(_ret);
return jni_zero::ScopedJavaLocalRef<jobjectArray>(env, _ret2);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobjectArray> Java_List_toArray(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobjectArray>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_List_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"toArray",
"([Ljava/lang/Object;)[Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj());
jobjectArray _ret2 = static_cast<jobjectArray>(_ret);
return jni_zero::ScopedJavaLocalRef<jobjectArray>(env, _ret2);
}
} // namespace JNI_List
#endif // java_util_List_JNI

View file

@ -0,0 +1,950 @@
// This file was generated by
// //third_party/jni_zero/jni_zero.py
// For
// java.util.Map
#ifndef java_util_Map_JNI
#define java_util_Map_JNI
#include <jni.h>
#include "third_party/jni_zero/jni_export.h"
#include "../../../../../../../third_party/jni_zero/jni_zero_internal.h"
// Class Accessors
#ifndef java_util_Map_clazz_defined
#define java_util_Map_clazz_defined
inline jclass java_util_Map_clazz(JNIEnv* env) {
static const char kClassName[] = "java/util/Map";
static std::atomic<jclass> cached_class;
return jni_zero::internal::LazyGetClass(env, kClassName, &cached_class);
}
#endif
namespace JNI_Map {
// Native to Java functions
[[maybe_unused]] static void Java_Map_clear(JNIEnv* env, const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"clear",
"()V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id());
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_compute(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"compute",
"(Ljava/lang/Object;Ljava/util/function/BiFunction;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_computeIfAbsent(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"computeIfAbsent",
"(Ljava/lang/Object;Ljava/util/function/Function;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_computeIfPresent(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"computeIfPresent",
"(Ljava/lang/Object;Ljava/util/function/BiFunction;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_Map_containsKey(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"containsKey",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jboolean Java_Map_containsValue(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"containsValue",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_copyOf(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"copyOf",
"(Ljava/util/Map;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_entry(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"entry",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map$Entry;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_entrySet(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"entrySet",
"()Ljava/util/Set;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_Map_equals(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"equals",
"(Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj());
return _ret;
}
[[maybe_unused]] static void Java_Map_forEach(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"forEach",
"(Ljava/util/function/BiConsumer;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_get(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"get",
"(Ljava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_getOrDefault(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"getOrDefault",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jint Java_Map_hashCode(JNIEnv* env, const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"hashCode",
"()I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jboolean Java_Map_isEmpty(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"isEmpty",
"()Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_keySet(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"keySet",
"()Ljava/util/Set;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_merge(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"merge",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/util/function/BiFunction;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(
obj.obj(),
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(JNIEnv* env) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"()Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9,
const jni_zero::JavaRef<jobject>& p10,
const jni_zero::JavaRef<jobject>& p11) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj(),
p10.obj(),
p11.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9,
const jni_zero::JavaRef<jobject>& p10,
const jni_zero::JavaRef<jobject>& p11,
const jni_zero::JavaRef<jobject>& p12,
const jni_zero::JavaRef<jobject>& p13) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj(),
p10.obj(),
p11.obj(),
p12.obj(),
p13.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9,
const jni_zero::JavaRef<jobject>& p10,
const jni_zero::JavaRef<jobject>& p11,
const jni_zero::JavaRef<jobject>& p12,
const jni_zero::JavaRef<jobject>& p13,
const jni_zero::JavaRef<jobject>& p14,
const jni_zero::JavaRef<jobject>& p15) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj(),
p10.obj(),
p11.obj(),
p12.obj(),
p13.obj(),
p14.obj(),
p15.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9,
const jni_zero::JavaRef<jobject>& p10,
const jni_zero::JavaRef<jobject>& p11,
const jni_zero::JavaRef<jobject>& p12,
const jni_zero::JavaRef<jobject>& p13,
const jni_zero::JavaRef<jobject>& p14,
const jni_zero::JavaRef<jobject>& p15,
const jni_zero::JavaRef<jobject>& p16,
const jni_zero::JavaRef<jobject>& p17) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj(),
p10.obj(),
p11.obj(),
p12.obj(),
p13.obj(),
p14.obj(),
p15.obj(),
p16.obj(),
p17.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_of(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2,
const jni_zero::JavaRef<jobject>& p3,
const jni_zero::JavaRef<jobject>& p4,
const jni_zero::JavaRef<jobject>& p5,
const jni_zero::JavaRef<jobject>& p6,
const jni_zero::JavaRef<jobject>& p7,
const jni_zero::JavaRef<jobject>& p8,
const jni_zero::JavaRef<jobject>& p9,
const jni_zero::JavaRef<jobject>& p10,
const jni_zero::JavaRef<jobject>& p11,
const jni_zero::JavaRef<jobject>& p12,
const jni_zero::JavaRef<jobject>& p13,
const jni_zero::JavaRef<jobject>& p14,
const jni_zero::JavaRef<jobject>& p15,
const jni_zero::JavaRef<jobject>& p16,
const jni_zero::JavaRef<jobject>& p17,
const jni_zero::JavaRef<jobject>& p18,
const jni_zero::JavaRef<jobject>& p19) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"of",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(
clazz,
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj(),
p3.obj(),
p4.obj(),
p5.obj(),
p6.obj(),
p7.obj(),
p8.obj(),
p9.obj(),
p10.obj(),
p11.obj(),
p12.obj(),
p13.obj(),
p14.obj(),
p15.obj(),
p16.obj(),
p17.obj(),
p18.obj(),
p19.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_ofEntries(
JNIEnv* env,
const jni_zero::JavaRef<jobjectArray>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, clazz, clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_STATIC>(
env,
clazz,
"ofEntries",
"([Ljava/util/Map$Entry;)Ljava/util/Map;",
&cached_method_id);
auto _ret = env->CallStaticObjectMethod(clazz, call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_put(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static void Java_Map_putAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"putAll",
"(Ljava/util/Map;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_putIfAbsent(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"putIfAbsent",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_Map_remove(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"remove",
"(Ljava/lang/Object;Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_remove(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"remove",
"(Ljava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static jboolean Java_Map_replace(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1,
const jni_zero::JavaRef<jobject>& p2) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, false);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"replace",
"(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)Z",
&cached_method_id);
auto _ret = env->CallBooleanMethod(
obj.obj(),
call_context.method_id(),
p0.obj(),
p1.obj(),
p2.obj());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_replace(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0,
const jni_zero::JavaRef<jobject>& p1) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"replace",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id(), p0.obj(), p1.obj());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
[[maybe_unused]] static void Java_Map_replaceAll(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj,
const jni_zero::JavaRef<jobject>& p0) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"replaceAll",
"(Ljava/util/function/BiFunction;)V",
&cached_method_id);
env->CallVoidMethod(obj.obj(), call_context.method_id(), p0.obj());
}
[[maybe_unused]] static jint Java_Map_size(JNIEnv* env, const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, 0);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"size",
"()I",
&cached_method_id);
auto _ret = env->CallIntMethod(obj.obj(), call_context.method_id());
return _ret;
}
[[maybe_unused]] static jni_zero::ScopedJavaLocalRef<jobject> Java_Map_values(
JNIEnv* env,
const jni_zero::JavaRef<jobject>& obj) {
static std::atomic<jmethodID> cached_method_id(nullptr);
jclass clazz = java_util_Map_clazz(env);
CHECK_CLAZZ(env, obj.obj(), clazz, nullptr);
jni_zero::internal::JniJavaCallContext<true> call_context;
call_context.Init<jni_zero::MethodID::TYPE_INSTANCE>(
env,
clazz,
"values",
"()Ljava/util/Collection;",
&cached_method_id);
auto _ret = env->CallObjectMethod(obj.obj(), call_context.method_id());
return jni_zero::ScopedJavaLocalRef<jobject>(env, _ret);
}
} // namespace JNI_Map
#endif // java_util_Map_JNI

View file

@ -0,0 +1,253 @@
Changes in 1.4.15-stable (5 January 2015)
o Avoid integer overflow bugs in evbuffer_add() and related functions. See CVE-2014-6272 advisory for more information. (d49bc0e88b81a5812116074dc007f1db0ca1eecd)
o Pass flags to fcntl(F_SETFL) as int, not long (b3d0382)
o Backport and tweak the LICENSE file for 1.4 (8a5ebd3)
o set close-on-exec bit for filedescriptors created by dns subsystem (9985231 Ralf Schmitt)
o Replace unused case of FD_CLOSEONEXEC with a proper null statement. (44f04a2)
o Fix kqueue correctness test on x84_64 (1c25b07)
o Avoid deadlock when activating signals. (e0e6958)
o Backport doc fix for evhttp_bind_socket. (95b71d0 Marco)
o Fix an issue with forking and signal socketpairs in select/poll backends (f0ff765)
o Fix compilation on Visual Studio 2010 (53c47c2 VDm)
o Defensive programming to prevent (hopefully impossible) stack-stomping (2d8cf0b)
o Check for POLLERR, POLLHUP and POLLNVAL for Solaris event ports (353b4ac Trond Norbye)
o Fix a bug that could allow dns requests with duplicate tx ids (e50ba5b)
o Avoid truncating huge values for content-length (1d6e30e)
o Take generated files out of git; add correct m4 magic for libtool to auto* files (7cf794b)
o Prefer autoregen -ivf to manual autogen.sh (823d9be)
Changes in 1.4.14b-stable
o Set the VERSION_INFO correctly for 1.4.14
Changes in 1.4.14-stable
o Add a .gitignore file for the 1.4 branch. (d014edb)
o Backport evbuffer_readln(). (b04cc60 Nicholas Marriott)
o Make the evbuffer_readln backport follow the current API (c545485)
o Valgrind fix: Clear struct kevent before checking for OSX bug. (5713d5d William Ahern)
o Fix a crash when reading badly formatted resolve.conf (5b10d00 Yasuoka Masahiko)
o Fix memory-leak of signal handler array with kqueue. [backport] (01f3775)
o Update sample/signal-test.c to use newer APIs and not leak. (891765c Evan Jones)
o Correct all versions in 1.4 branch (ac0d213)
o Make evutil_make_socket_nonblocking() leave any other flags alone. (81c26ba Jardel Weyrich)
o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (5f2e250 Jardel Weyrich)
o Correct a debug message in evhttp_parse_request_line (35df59e)
o Merge branch 'readln-backport' into patches-1.4 (8771d5b)
o Do not send an HTTP error when we've already closed or responded. (4fd2dd9 Pavel Plesov)
o Re-add event_siglcb; some old code _was_ still using it. :( (bd03d06)
o Make Libevent 1.4 build on win32 with Unicode enabled. (bce58d6 Brodie Thiesfield)
o Distribute nmake makefile for 1.4 (20d706d)
o do not fail while sending on http connections the client closed. (5c8b446)
o make evhttp_send() safe against terminated connections, too (01ea0c5)
o Fix a free(NULL) in min_heap.h (2458934)
o Fix memory leak when setting up priorities; reported by Alexander Drozdov (cb1a722)
o Clean up properly when adding a signal handler fails. (ae6ece0 Gilad Benjamini)
o Do not abort HTTP requests missing a reason string. (29d7b32 Pierre Phaneuf)
o Fix compile warning in http.c (906d573)
o Define _REENTRANT as needed on Solaris, elsewhere (6cbea13)
Changes in 1.4.13-stable:
o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it. Fixes bug 2841177; found by Alexander Pronchenkov.
o Do not allocate the maximum event queue and fd array for the epoll backend at startup. Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them. Saves up to 512K per epoll-based event_base. Resolves bug 2839240.
o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h
o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec
o Rename our replacement compat/sys/_time.h header to avoid build a conflict on HPUX; reported by Kathryn Hogg.
o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian.
o Fix a problem with excessive memory allocation when using multiple event priorities.
o When running set[ug]id, don't check the environment. Based on a patch from OpenBSD.
Changes in 1.4.12-stable:
o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair.
o Fix an obscure timing-dependent, allocator-dependent crash in the evdns code.
o Use __VA_ARGS__ syntax for varargs macros in event_rpcgen when compiler is not GCC.
o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32).
o Fix another pair of fencepost bugs in epoll.c. [Patch from Adam Langley.]
o Do not break evdns connections to nameservers when our IP changes.
o Set truncated flag correctly in evdns server replies.
o Disable strict aliasing with GCC: our code is not compliant with it.
Changes in 1.4.11-stable:
o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen]
o Remove the limit on size of HTTP headers by removing static buffers.
o Fix a nasty dangling pointer bug in epoll.c that could occur after epoll_recalc(). [Patch from Kevin Springborn]
o Distribute Win32-Code/event-config.h, not ./event-config.h
Changes in 1.4.10-stable:
o clean up buffered http connection data on reset; reported by Brian O'Kelley
o bug fix and potential race condition in signal handling; from Alexander Drozdov
o rename the Solaris event ports backend to evport
o support compilation on Haiku
o fix signal processing when a signal callback delivers a signal; from Alexander Drozdov
o const-ify some arguments to evdns functions.
o off-by-one error in epoll_recalc; reported by Victor Goya
o include Doxyfile in tar ball; from Jeff Garzik
o correctly parse queries with encoded \r, \n or + characters
Changes in 1.4.9-stable:
o event_add would not return error for some backends; from Dean McNamee
o Clear the timer cache on entering the event loop; reported by Victor Chang
o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez
o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones.
o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn
o Fix a typo in setting the global event base; reported by lance.
o Fix a memory leak when reading multi-line headers
o Fix a memory leak by not running explicit close detection for server connections
Changes in 1.4.8-stable:
o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov.
o Fix a merge problem in which name_from_addr returned pointers to the stack; found by Jiang Hong.
o Do not remove Accept-Encoding header
Changes in 1.4.7-stable:
o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me.
Changes in 1.4.6-stable:
o evutil.h now includes <stdarg.h> directly
o switch all uses of [v]snprintf over to evutil
o Correct handling of trailing headers in chunked replies; from Scott Lamb.
o Support multi-line HTTP headers; based on a patch from Moshe Litvin
o Reject negative Content-Length headers; anonymous bug report
o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report
o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail
o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov.
o Deal with evbuffer_read() returning -1 on EINTR|EAGAIN; from Adam Langley.
o Fix a bug in which the DNS server would incorrectly set the type of a cname reply to a.
o Fix a bug where setting the timeout on a bufferevent would take not effect if the event was already pending.
o Fix a memory leak when using signals for some event bases; reported by Alexander Drozdov.
o Add libevent.vcproj file to distribution to help with Windows build.
o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov.
o Fix off-by-one errors in devpoll; from Ian Bell
o Make event_add not change any state if it fails; reported by Ian Bell.
o Do not warn on accept when errno is either EAGAIN or EINTR
Changes in 1.4.5-stable:
o Fix connection keep-alive behavior for HTTP/1.0
o Fix use of freed memory in event_reinit; pointed out by Peter Postma
o Constify struct timeval * where possible; pointed out by Forest Wilkinson
o allow min_heap_erase to be called on removed members; from liusifan.
o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility.
o Do not use SO_REUSEADDR when connecting
o Fix Windows build
o Fix a bug in event_rpcgen when generated fixed-sized entries
Changes in 1.4.4-stable:
o Correct the documentation on buffer printf functions.
o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select.
o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed.
o Fix a potential stack corruption bug in tagging on 64-bit CPUs.
o expose bufferevent_setwatermark via header files and fix high watermark on read
o fix a bug in bufferevent read water marks and add a test for them
o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents
o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy.
o reduce system calls for getting current time by caching it.
o fix evhttp_bind_socket() so that multiple sockets can be bound by the same http server.
o Build test directory correctly with CPPFLAGS set.
o Fix build under Visual C++ 2005.
o Expose evhttp_accept_socket() API.
o Merge windows gettimeofday() replacement into a new evutil_gettimeofday() function.
o Fix autoconf script behavior on IRIX.
o Make sure winsock2.h include always comes before windows.h include.
Changes in 1.4.3-stable:
o include Content-Length in reply for HTTP/1.0 requests with keep-alive
o Patch from Tani Hosokawa: make some functions in http.c threadsafe.
o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin
o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks.
o make event methods static so that they are not exported; from Andrei Nigmatulin
o make RPC replies use application/octet-stream as mime type
o do not delete uninitialized timeout event in evdns
Changes in 1.4.2-rc:
o remove pending timeouts on event_base_free()
o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards
o devpoll and evport need reinit; tested by W.C.A Wijngaards
o event_base_get_method; from Springande Ulv
o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184.
o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values.
o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb.
o Allow regression code to build even without Python installed
o remove NDEBUG ifdefs from evdns.c
o update documentation of event_loop and event_base_loop; from Tani Hosokawa.
o detect integer types properly on platforms without stdint.h
o Remove "AM_MAINTAINER_MODE" declaration in configure.in: now makefiles and configure should get re-generated automatically when Makefile.am or configure.in chanes.
o do not insert event into list when evsel->add fails
Changes in 1.4.1-beta:
o free minheap on event_base_free(); from Christopher Layne
o debug cleanups in signal.c; from Christopher Layne
o provide event_base_new() that does not set the current_base global
o bufferevent_write now uses a const source argument; report from Charles Kerr
o better documentation for event_base_loopexit; from Scott Lamb.
o Make kqueue have the same behavior as other backends when a signal is caught between event_add() and event_loop(). Previously, it would catch and ignore such signals.
o Make kqueue restore signal handlers correctly when event_del() is called.
o provide event_reinit() to reintialize an event_base after fork
o small improvements to evhttp documentation
o always generate Date and Content-Length headers for HTTP/1.1 replies
o set the correct event base for HTTP close events
o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb
o Removed obsoleted recalc code
o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures.
o fix a bug with event_rpcgen for integers
o move EV_PERSIST handling out of the event backends
o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly.
o prefix {encode,decode}_tag functions with evtag to avoid collisions
o Correctly handle DNS replies with no answers set (Fixes bug 1846282)
o The configure script now takes an --enable-gcc-warnigns option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.)
o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions.
o removed linger from http server socket; reported by Ilya Martynov
o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
o demote most http warnings to debug messages
o Fix Solaris compilation; from Magne Mahre
o Add a "Date" header to HTTP responses, as required by HTTP 1.1.
o Support specifying the local address of an evhttp_connection using set_local_address
o Fix a memory leak in which failed HTTP connections would not free the request object
o Make adding of array members in event_rpcgen more efficient, but doubling memory allocation
o Fix a memory leak in the DNS server
o Fix compilation when DNS_USE_OPENSSL_FOR_ID is enabled
o Fix buffer size and string generation in evdns_resolve_reverse_ipv6().
o Respond to nonstandard DNS queries with "NOTIMPL" rather than by ignoring them.
o In DNS responses, the CD flag should be preserved, not the TC flag.
o Fix http.c to compile properly with USE_DEBUG; from Christopher Layne
o Handle NULL timeouts correctly on Solaris; from Trond Norbye
o Recalculate pending events properly when reallocating event array on Solaris; from Trond Norbye
o Add Doxygen documentation to header files; from Mark Heily
o Add a evdns_set_transaction_id_fn() function to override the default
transaction ID generation code.
o Add an evutil module (with header evutil.h) to implement our standard cross-platform hacks, on the theory that somebody else would like to use them too.
o Fix signals implementation on windows.
o Fix http module on windows to close sockets properly.
o Make autogen.sh script run correctly on systems where /bin/sh isn't bash. (Patch from Trond Norbye, rewritten by Hagne Mahre and then Hannah Schroeter.)
o Skip calling gettime() in timeout_process if we are not in fact waiting for any events. (Patch from Trond Norbye)
o Make test subdirectory compile under mingw.
o Fix win32 buffer.c behavior so that it is correct for sockets (which do not like ReadFile and WriteFile).
o Make the test.sh script run unit tests for the evpoll method.
o Make the entire evdns.h header enclosed in "extern C" as appropriate.
o Fix implementation of strsep on platforms that lack it
o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov.
o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin
o associate an event base with an rpc pool
o added two additional libraries: libevent_core and libevent_extra in addition to the regular libevent. libevent_core contains only the event core whereas libevent_extra contains dns, http and rpc support
o Begin using libtool's library versioning support correctly. If we don't mess up, this will more or less guarantee binaries linked against old versions of libevent continue working when we make changes to libevent that do not break backward compatibility.
o Fix evhttp.h compilation when TAILQ_ENTRY is not defined.
o Small code cleanups in epoll_dispatch().
o Increase the maximum number of addresses read from a packet in evdns to 32.
o Remove support for the rtsig method: it hasn't compiled for a while, and nobody seems to miss it very much. Let us know if there's a good reason to put it back in.
o Rename the "class" field in evdns_server_request to dns_question_class, so that it won't break compilation under C++. Use a macro so that old code won't break. Mark the macro as deprecated.
o Fix DNS unit tests so that having a DNS server with broken IPv6 support is no longer cause for aborting the unit tests.
o Make event_base_free() succeed even if there are pending non-internal events on a base. This may still leak memory and fds, but at least it no longer crashes.
o Post-process the config.h file into a new, installed event-config.h file that we can install, and whose macros will be safe to include in header files.
o Remove the long-deprecated acconfig.h file.
o Do not require #include <sys/types.h> before #include <event.h>.
o Add new evutil_timer* functions to wrap (or replace) the regular timeval manipulation functions.
o Fix many build issues when using the Microsoft C compiler.
o Remove a bash-ism in autogen.sh
o When calling event_del on a signal, restore the signal handler's previous value rather than setting it to SIG_DFL. Patch from Christopher Layne.
o Make the logic for active events work better with internal events; patch from Christopher Layne.
o We do not need to specially remove a timeout before calling event_del; patch from Christopher Layne.

View file

@ -0,0 +1,53 @@
Libevent is available for use under the following license, commonly known
as the 3-clause (or "modified") BSD license:
==============================
Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================
Portions of Libevent are based on works by others, also made available by
them under the three-clause BSD license above. The copyright notices are
available in the corresponding source files; the license is as above. Here's
a list:
log.c:
Copyright (c) 2000 Dug Song <dugsong@monkey.org>
Copyright (c) 1993 The Regents of the University of California.
strlcpy.c:
Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
win32.c:
Copyright (c) 2003 Michael A. Davis <mike@datanerds.net>
evport.c:
Copyright (c) 2007 Sun Microsystems
min_heap.h:
Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
tree.h:
Copyright 2002 Niels Provos <provos@citi.umich.edu>

View file

@ -0,0 +1,57 @@
To build libevent, type
$ ./configure && make
(If you got libevent from the subversion repository, you will
first need to run the included "autogen.sh" script in order to
generate the configure script.)
Install as root via
# make install
You can run the regression tests by
$ make verify
Before, reporting any problems, please run the regression tests.
To enable the low-level tracing build the library as:
CFLAGS=-DUSE_DEBUG ./configure [...]
Acknowledgements:
-----------------
The following people have helped with suggestions, ideas, code or
fixing bugs:
Alejo
Weston Andros Adamson
William Ahern
Stas Bekman
Andrew Danforth
Mike Davis
Shie Erlich
Alexander von Gernler
Artur Grabowski
Aaron Hopkins
Claudio Jeker
Scott Lamb
Adam Langley
Philip Lewis
David Libenzi
Nick Mathewson
Andrey Matveev
Richard Nyberg
Jon Oberheide
Phil Oleson
Dave Pacheco
Tassilo von Parseval
Pierre Phaneuf
Jon Poland
Bert JW Regeer
Dug Song
Taral
If I have forgotten your name, please contact me.

View file

@ -0,0 +1,40 @@
Name: libevent
URL: http://libevent.org/
Version: 1.4.15
License: BSD
Security Critical: yes
Local Modifications:
Rather than use libevent's own build system, we just build a Chrome
static library using GYP.
1) Run configure and "make event-config.h" on Linux, FreeBSD, Solaris,
and Mac and copy config.h and event-config.h to linux/, freebsd/,
solaris/, and mac/ respectively.
2) Add libevent.gyp.
3) chromium.patch is applied to make the following changes:
- Allow libevent to be used without being installed by changing <...>
#includes to "...".
- Fix a race condition in event_del.
- Optimistically assume CLOCK_MONOTONIC is available and fallback if it
fails, rather than explicitly testing for it.
- Remove an unneeded variable that causes a -Werror build failure.
- Add an #ifndef to fix a preprocessor redefined -Werror build failure.
- Revert the patch from http://sourceforge.net/p/levent/bugs/223/ that
introduces use-after-free memory corruption when an event callback frees
the struct event memory.
- Remove deprecated global variables, event_sigcb and event_gotsig
(essentially unused) that trigger tsan errors. (crbug/605894)
4) The directories WIN32-Code and WIN32-Prj are not included.
5) The configs for android were copied from Linux's which were very close to
android one with the exception of HAVE_FD_MASK and HAVE_STRLCPY.
6) Add files to support building with the PNaCl toolchain. Added
libevent_nacl_nonsfi.gyp for build rule. nacl_nonsfi/config.h and
nacl_nonsfi/event-config.h are derived from linux/ counterparts.
nacl_nonsfi/random.c is also added to provide the random() function,
which is missing in the newlib-based PNaCl toolchain.
7) Stub out signal.c for nacl_helper_nonsfi. socketpair() will be prohibited
by sandbox in nacl_helper_nonsfi.
8) Remove an unnecessary workaround for OS X 10.4 from kqueue.c. It was causing
problems on macOS Sierra.
9) Change buffer.c to not redefine _GNU_SOURCE.

View file

@ -0,0 +1,552 @@
/*
* Copyright (c) 2002, 2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#ifdef WIN32
#include <winsock2.h>
#include <windows.h>
#endif
#if defined(HAVE_VASPRINTF) && !defined(_GNU_SOURCE)
/* If we have vasprintf, we need to define this before we include stdio.h. */
#define _GNU_SOURCE
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "event.h"
#include "config.h"
#include "evutil.h"
#include "./log.h"
struct evbuffer *
evbuffer_new(void)
{
struct evbuffer *buffer;
buffer = calloc(1, sizeof(struct evbuffer));
return (buffer);
}
void
evbuffer_free(struct evbuffer *buffer)
{
if (buffer->orig_buffer != NULL)
free(buffer->orig_buffer);
free(buffer);
}
/*
* This is a destructive add. The data from one buffer moves into
* the other buffer.
*/
#define SWAP(x,y) do { \
(x)->buffer = (y)->buffer; \
(x)->orig_buffer = (y)->orig_buffer; \
(x)->misalign = (y)->misalign; \
(x)->totallen = (y)->totallen; \
(x)->off = (y)->off; \
} while (0)
int
evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
{
int res;
/* Short cut for better performance */
if (outbuf->off == 0) {
struct evbuffer tmp;
size_t oldoff = inbuf->off;
/* Swap them directly */
SWAP(&tmp, outbuf);
SWAP(outbuf, inbuf);
SWAP(inbuf, &tmp);
/*
* Optimization comes with a price; we need to notify the
* buffer if necessary of the changes. oldoff is the amount
* of data that we transfered from inbuf to outbuf
*/
if (inbuf->off != oldoff && inbuf->cb != NULL)
(*inbuf->cb)(inbuf, oldoff, inbuf->off, inbuf->cbarg);
if (oldoff && outbuf->cb != NULL)
(*outbuf->cb)(outbuf, 0, oldoff, outbuf->cbarg);
return (0);
}
res = evbuffer_add(outbuf, inbuf->buffer, inbuf->off);
if (res == 0) {
/* We drain the input buffer on success */
evbuffer_drain(inbuf, inbuf->off);
}
return (res);
}
int
evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
{
char *buffer;
size_t space;
size_t oldoff = buf->off;
int sz;
va_list aq;
/* make sure that at least some space is available */
if (evbuffer_expand(buf, 64) < 0)
return (-1);
for (;;) {
size_t used = buf->misalign + buf->off;
buffer = (char *)buf->buffer + buf->off;
assert(buf->totallen >= used);
space = buf->totallen - used;
#ifndef va_copy
#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
#endif
va_copy(aq, ap);
sz = evutil_vsnprintf(buffer, space, fmt, aq);
va_end(aq);
if (sz < 0)
return (-1);
if ((size_t)sz < space) {
buf->off += sz;
if (buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (sz);
}
if (evbuffer_expand(buf, sz + 1) == -1)
return (-1);
}
/* NOTREACHED */
}
int
evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
{
int res = -1;
va_list ap;
va_start(ap, fmt);
res = evbuffer_add_vprintf(buf, fmt, ap);
va_end(ap);
return (res);
}
/* Reads data from an event buffer and drains the bytes read */
int
evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen)
{
size_t nread = datlen;
if (nread >= buf->off)
nread = buf->off;
memcpy(data, buf->buffer, nread);
evbuffer_drain(buf, nread);
return (nread);
}
/*
* Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
* The returned buffer needs to be freed by the called.
*/
char *
evbuffer_readline(struct evbuffer *buffer)
{
u_char *data = EVBUFFER_DATA(buffer);
size_t len = EVBUFFER_LENGTH(buffer);
char *line;
unsigned int i;
for (i = 0; i < len; i++) {
if (data[i] == '\r' || data[i] == '\n')
break;
}
if (i == len)
return (NULL);
if ((line = malloc(i + 1)) == NULL) {
fprintf(stderr, "%s: out of memory\n", __func__);
return (NULL);
}
memcpy(line, data, i);
line[i] = '\0';
/*
* Some protocols terminate a line with '\r\n', so check for
* that, too.
*/
if ( i < len - 1 ) {
char fch = data[i], sch = data[i+1];
/* Drain one more character if needed */
if ( (sch == '\r' || sch == '\n') && sch != fch )
i += 1;
}
evbuffer_drain(buffer, i + 1);
return (line);
}
char *
evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
enum evbuffer_eol_style eol_style)
{
u_char *data = EVBUFFER_DATA(buffer);
u_char *start_of_eol, *end_of_eol;
size_t len = EVBUFFER_LENGTH(buffer);
char *line;
unsigned int i, n_to_copy, n_to_drain;
if (n_read_out)
*n_read_out = 0;
/* depending on eol_style, set start_of_eol to the first character
* in the newline, and end_of_eol to one after the last character. */
switch (eol_style) {
case EVBUFFER_EOL_ANY:
for (i = 0; i < len; i++) {
if (data[i] == '\r' || data[i] == '\n')
break;
}
if (i == len)
return (NULL);
start_of_eol = data+i;
++i;
for ( ; i < len; i++) {
if (data[i] != '\r' && data[i] != '\n')
break;
}
end_of_eol = data+i;
break;
case EVBUFFER_EOL_CRLF:
end_of_eol = memchr(data, '\n', len);
if (!end_of_eol)
return (NULL);
if (end_of_eol > data && *(end_of_eol-1) == '\r')
start_of_eol = end_of_eol - 1;
else
start_of_eol = end_of_eol;
end_of_eol++; /*point to one after the LF. */
break;
case EVBUFFER_EOL_CRLF_STRICT: {
u_char *cp = data;
while ((cp = memchr(cp, '\r', len-(cp-data)))) {
if (cp < data+len-1 && *(cp+1) == '\n')
break;
if (++cp >= data+len) {
cp = NULL;
break;
}
}
if (!cp)
return (NULL);
start_of_eol = cp;
end_of_eol = cp+2;
break;
}
case EVBUFFER_EOL_LF:
start_of_eol = memchr(data, '\n', len);
if (!start_of_eol)
return (NULL);
end_of_eol = start_of_eol + 1;
break;
default:
return (NULL);
}
n_to_copy = start_of_eol - data;
n_to_drain = end_of_eol - data;
if ((line = malloc(n_to_copy+1)) == NULL) {
event_warn("%s: out of memory\n", __func__);
return (NULL);
}
memcpy(line, data, n_to_copy);
line[n_to_copy] = '\0';
evbuffer_drain(buffer, n_to_drain);
if (n_read_out)
*n_read_out = (size_t)n_to_copy;
return (line);
}
/* Adds data to an event buffer */
static void
evbuffer_align(struct evbuffer *buf)
{
memmove(buf->orig_buffer, buf->buffer, buf->off);
buf->buffer = buf->orig_buffer;
buf->misalign = 0;
}
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t)-1)
#endif
/* Expands the available space in the event buffer to at least datlen */
int
evbuffer_expand(struct evbuffer *buf, size_t datlen)
{
size_t used = buf->misalign + buf->off;
assert(buf->totallen >= used);
/* If we can fit all the data, then we don't have to do anything */
if (buf->totallen - used >= datlen)
return (0);
/* If we would need to overflow to fit this much data, we can't
* do anything. */
if (datlen > SIZE_MAX - buf->off)
return (-1);
/*
* If the misalignment fulfills our data needs, we just force an
* alignment to happen. Afterwards, we have enough space.
*/
if (buf->totallen - buf->off >= datlen) {
evbuffer_align(buf);
} else {
void *newbuf;
size_t length = buf->totallen;
size_t need = buf->off + datlen;
if (length < 256)
length = 256;
if (need < SIZE_MAX / 2) {
while (length < need) {
length <<= 1;
}
} else {
length = need;
}
if (buf->orig_buffer != buf->buffer)
evbuffer_align(buf);
if ((newbuf = realloc(buf->buffer, length)) == NULL)
return (-1);
buf->orig_buffer = buf->buffer = newbuf;
buf->totallen = length;
}
return (0);
}
int
evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen)
{
size_t used = buf->misalign + buf->off;
size_t oldoff = buf->off;
if (buf->totallen - used < datlen) {
if (evbuffer_expand(buf, datlen) == -1)
return (-1);
}
memcpy(buf->buffer + buf->off, data, datlen);
buf->off += datlen;
if (datlen && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (0);
}
void
evbuffer_drain(struct evbuffer *buf, size_t len)
{
size_t oldoff = buf->off;
if (len >= buf->off) {
buf->off = 0;
buf->buffer = buf->orig_buffer;
buf->misalign = 0;
goto done;
}
buf->buffer += len;
buf->misalign += len;
buf->off -= len;
done:
/* Tell someone about changes in this buffer */
if (buf->off != oldoff && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
}
/*
* Reads data from a file descriptor into a buffer.
*/
#define EVBUFFER_MAX_READ 4096
int
evbuffer_read(struct evbuffer *buf, int fd, int howmuch)
{
u_char *p;
size_t oldoff = buf->off;
int n = EVBUFFER_MAX_READ;
#if defined(FIONREAD)
#ifdef WIN32
long lng = n;
if (ioctlsocket(fd, FIONREAD, &lng) == -1 || (n=lng) <= 0) {
#else
if (ioctl(fd, FIONREAD, &n) == -1 || n <= 0) {
#endif
n = EVBUFFER_MAX_READ;
} else if (n > EVBUFFER_MAX_READ && n > howmuch) {
/*
* It's possible that a lot of data is available for
* reading. We do not want to exhaust resources
* before the reader has a chance to do something
* about it. If the reader does not tell us how much
* data we should read, we artifically limit it.
*/
if ((size_t)n > buf->totallen << 2)
n = buf->totallen << 2;
if (n < EVBUFFER_MAX_READ)
n = EVBUFFER_MAX_READ;
}
#endif
if (howmuch < 0 || howmuch > n)
howmuch = n;
/* If we don't have FIONREAD, we might waste some space here */
if (evbuffer_expand(buf, howmuch) == -1)
return (-1);
/* We can append new data at this point */
p = buf->buffer + buf->off;
#ifndef WIN32
n = read(fd, p, howmuch);
#else
n = recv(fd, p, howmuch, 0);
#endif
if (n == -1)
return (-1);
if (n == 0)
return (0);
buf->off += n;
/* Tell someone about changes in this buffer */
if (buf->off != oldoff && buf->cb != NULL)
(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
return (n);
}
int
evbuffer_write(struct evbuffer *buffer, int fd)
{
int n;
#ifndef WIN32
n = write(fd, buffer->buffer, buffer->off);
#else
n = send(fd, buffer->buffer, buffer->off, 0);
#endif
if (n == -1)
return (-1);
if (n == 0)
return (0);
evbuffer_drain(buffer, n);
return (n);
}
u_char *
evbuffer_find(struct evbuffer *buffer, const u_char *what, size_t len)
{
u_char *search = buffer->buffer, *end = search + buffer->off;
u_char *p;
while (search < end &&
(p = memchr(search, *what, end - search)) != NULL) {
if (p + len > end)
break;
if (memcmp(p, what, len) == 0)
return (p);
search = p + 1;
}
return (NULL);
}
void evbuffer_setcb(struct evbuffer *buffer,
void (*cb)(struct evbuffer *, size_t, size_t, void *),
void *cbarg)
{
buffer->cb = cb;
buffer->cbarg = cbarg;
}

View file

@ -0,0 +1,266 @@
/* Copied from Linux version and changed the features according Android, which
* is close to Linux */
/* Define if clock_gettime is available in libc */
#define DNS_USE_CPU_CLOCK_FOR_ID 1
/* Define is no secure id variant is available */
/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
/* Define to 1 if you have the `clock_gettime' function. */
#define HAVE_CLOCK_GETTIME 1
/* Define if /dev/poll is available */
/* #undef HAVE_DEVPOLL */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define if your system supports the epoll system calls */
#define HAVE_EPOLL 1
/* Define to 1 if you have the `epoll_ctl' function. */
#define HAVE_EPOLL_CTL 1
/* Define if your system supports event ports */
/* #undef HAVE_EVENT_PORTS */
/* Define to 1 if you have the `fcntl' function. */
#define HAVE_FCNTL 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if the system has the type `fd_mask'. */
/* #undef HAVE_FD_MASK */
/* Define to 1 if you have the `getaddrinfo' function. */
#define HAVE_GETADDRINFO 1
/* Define to 1 if you have the `getegid' function. */
#define HAVE_GETEGID 1
/* Define to 1 if you have the `geteuid' function. */
#define HAVE_GETEUID 1
/* Define to 1 if you have the `getnameinfo' function. */
#define HAVE_GETNAMEINFO 1
/* Define to 1 if you have the `gettimeofday' function. */
#define HAVE_GETTIMEOFDAY 1
/* Define to 1 if you have the `inet_ntop' function. */
#define HAVE_INET_NTOP 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the `issetugid' function. */
/* #undef HAVE_ISSETUGID */
/* Define to 1 if you have the `kqueue' function. */
/* #undef HAVE_KQUEUE */
/* Define to 1 if you have the `nsl' library (-lnsl). */
#define HAVE_LIBNSL 1
/* Define to 1 if you have the `resolv' library (-lresolv). */
#define HAVE_LIBRESOLV 1
/* Define to 1 if you have the `rt' library (-lrt). */
#define HAVE_LIBRT 1
/* Define to 1 if you have the `socket' library (-lsocket). */
/* #undef HAVE_LIBSOCKET */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the <netinet/in6.h> header file. */
/* #undef HAVE_NETINET_IN6_H */
/* Define to 1 if you have the `poll' function. */
#define HAVE_POLL 1
/* Define to 1 if you have the <poll.h> header file. */
#define HAVE_POLL_H 1
/* Define to 1 if you have the `port_create' function. */
/* #undef HAVE_PORT_CREATE */
/* Define to 1 if you have the <port.h> header file. */
/* #undef HAVE_PORT_H */
/* Define to 1 if you have the `select' function. */
#define HAVE_SELECT 1
/* Define if F_SETFD is defined in <fcntl.h> */
#define HAVE_SETFD 1
/* Define to 1 if you have the `sigaction' function. */
#define HAVE_SIGACTION 1
/* Define to 1 if you have the `signal' function. */
#define HAVE_SIGNAL 1
/* Define to 1 if you have the <signal.h> header file. */
#define HAVE_SIGNAL_H 1
/* Define to 1 if you have the <stdarg.h> header file. */
#define HAVE_STDARG_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* Define to 1 if you have the `strsep' function. */
#define HAVE_STRSEP 1
/* Define to 1 if you have the `strtok_r' function. */
#define HAVE_STRTOK_R 1
/* Define to 1 if you have the `strtoll' function. */
#define HAVE_STRTOLL 1
/* Define to 1 if the system has the type `struct in6_addr'. */
#define HAVE_STRUCT_IN6_ADDR 1
/* Define to 1 if you have the <sys/devpoll.h> header file. */
/* #undef HAVE_SYS_DEVPOLL_H */
/* Define to 1 if you have the <sys/epoll.h> header file. */
#define HAVE_SYS_EPOLL_H 1
/* Define to 1 if you have the <sys/event.h> header file. */
/* #undef HAVE_SYS_EVENT_H */
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/queue.h> header file. */
#define HAVE_SYS_QUEUE_H 1
/* Define to 1 if you have the <sys/select.h> header file. */
#define HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
#define HAVE_TAILQFOREACH 1
/* Define if timeradd is defined in <sys/time.h> */
#define HAVE_TIMERADD 1
/* Define if timerclear is defined in <sys/time.h> */
#define HAVE_TIMERCLEAR 1
/* Define if timercmp is defined in <sys/time.h> */
#define HAVE_TIMERCMP 1
/* Define if timerisset is defined in <sys/time.h> */
#define HAVE_TIMERISSET 1
/* Define to 1 if the system has the type `uint16_t'. */
#define HAVE_UINT16_T 1
/* Define to 1 if the system has the type `uint32_t'. */
#define HAVE_UINT32_T 1
/* Define to 1 if the system has the type `uint64_t'. */
#define HAVE_UINT64_T 1
/* Define to 1 if the system has the type `uint8_t'. */
#define HAVE_UINT8_T 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the `vasprintf' function. */
#define HAVE_VASPRINTF 1
/* Define if kqueue works correctly with pipes */
/* #undef HAVE_WORKING_KQUEUE */
/* Name of package */
#define PACKAGE "libevent"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME ""
/* Define to the version of this package. */
#define PACKAGE_VERSION ""
/* The size of `int', as computed by sizeof. */
#define SIZEOF_INT 4
/* The size of `long', as computed by sizeof. */
#define SIZEOF_LONG 8
/* The size of `long long', as computed by sizeof. */
#define SIZEOF_LONG_LONG 8
/* The size of `short', as computed by sizeof. */
#define SIZEOF_SHORT 2
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define TIME_WITH_SYS_TIME 1
/* Version number of package */
#define VERSION "1.4.13-stable"
/* Define to appropriate substitue if compiler doesnt have __func__ */
/* #undef __func__ */
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to `int' if <sys/types.h> does not define. */
/* #undef pid_t */
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef size_t */
/* Define to unsigned int if you dont have it */
/* #undef socklen_t */

View file

@ -0,0 +1,416 @@
/*
* Copyright 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <sys/types.h>
#include <sys/resource.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#include <sys/queue.h>
#include <sys/devpoll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <assert.h>
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
/* due to limitations in the devpoll interface, we need to keep track of
* all file descriptors outself.
*/
struct evdevpoll {
struct event *evread;
struct event *evwrite;
};
struct devpollop {
struct evdevpoll *fds;
int nfds;
struct pollfd *events;
int nevents;
int dpfd;
struct pollfd *changes;
int nchanges;
};
static void *devpoll_init (struct event_base *);
static int devpoll_add (void *, struct event *);
static int devpoll_del (void *, struct event *);
static int devpoll_dispatch (struct event_base *, void *, struct timeval *);
static void devpoll_dealloc (struct event_base *, void *);
const struct eventop devpollops = {
"devpoll",
devpoll_init,
devpoll_add,
devpoll_del,
devpoll_dispatch,
devpoll_dealloc,
1 /* need reinit */
};
#define NEVENT 32000
static int
devpoll_commit(struct devpollop *devpollop)
{
/*
* Due to a bug in Solaris, we have to use pwrite with an offset of 0.
* Write is limited to 2GB of data, until it will fail.
*/
if (pwrite(devpollop->dpfd, devpollop->changes,
sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
return(-1);
devpollop->nchanges = 0;
return(0);
}
static int
devpoll_queue(struct devpollop *devpollop, int fd, int events) {
struct pollfd *pfd;
if (devpollop->nchanges >= devpollop->nevents) {
/*
* Change buffer is full, must commit it to /dev/poll before
* adding more
*/
if (devpoll_commit(devpollop) != 0)
return(-1);
}
pfd = &devpollop->changes[devpollop->nchanges++];
pfd->fd = fd;
pfd->events = events;
pfd->revents = 0;
return(0);
}
static void *
devpoll_init(struct event_base *base)
{
int dpfd, nfiles = NEVENT;
struct rlimit rl;
struct devpollop *devpollop;
/* Disable devpoll when this environment variable is set */
if (evutil_getenv("EVENT_NODEVPOLL"))
return (NULL);
if (!(devpollop = calloc(1, sizeof(struct devpollop))))
return (NULL);
if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
rl.rlim_cur != RLIM_INFINITY)
nfiles = rl.rlim_cur;
/* Initialize the kernel queue */
if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
event_warn("open: /dev/poll");
free(devpollop);
return (NULL);
}
devpollop->dpfd = dpfd;
/* Initialize fields */
devpollop->events = calloc(nfiles, sizeof(struct pollfd));
if (devpollop->events == NULL) {
free(devpollop);
close(dpfd);
return (NULL);
}
devpollop->nevents = nfiles;
devpollop->fds = calloc(nfiles, sizeof(struct evdevpoll));
if (devpollop->fds == NULL) {
free(devpollop->events);
free(devpollop);
close(dpfd);
return (NULL);
}
devpollop->nfds = nfiles;
devpollop->changes = calloc(nfiles, sizeof(struct pollfd));
if (devpollop->changes == NULL) {
free(devpollop->fds);
free(devpollop->events);
free(devpollop);
close(dpfd);
return (NULL);
}
evsignal_init(base);
return (devpollop);
}
static int
devpoll_recalc(struct event_base *base, void *arg, int max)
{
struct devpollop *devpollop = arg;
if (max >= devpollop->nfds) {
struct evdevpoll *fds;
int nfds;
nfds = devpollop->nfds;
while (nfds <= max)
nfds <<= 1;
fds = realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
if (fds == NULL) {
event_warn("realloc");
return (-1);
}
devpollop->fds = fds;
memset(fds + devpollop->nfds, 0,
(nfds - devpollop->nfds) * sizeof(struct evdevpoll));
devpollop->nfds = nfds;
}
return (0);
}
static int
devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
struct devpollop *devpollop = arg;
struct pollfd *events = devpollop->events;
struct dvpoll dvp;
struct evdevpoll *evdp;
int i, res, timeout = -1;
if (devpollop->nchanges)
devpoll_commit(devpollop);
if (tv != NULL)
timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
dvp.dp_fds = devpollop->events;
dvp.dp_nfds = devpollop->nevents;
dvp.dp_timeout = timeout;
res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
if (res == -1) {
if (errno != EINTR) {
event_warn("ioctl: DP_POLL");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: devpoll_wait reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
int what = events[i].revents;
struct event *evread = NULL, *evwrite = NULL;
assert(events[i].fd < devpollop->nfds);
evdp = &devpollop->fds[events[i].fd];
if (what & POLLHUP)
what |= POLLIN | POLLOUT;
else if (what & POLLERR)
what |= POLLIN | POLLOUT;
if (what & POLLIN) {
evread = evdp->evread;
which |= EV_READ;
}
if (what & POLLOUT) {
evwrite = evdp->evwrite;
which |= EV_WRITE;
}
if (!which)
continue;
if (evread != NULL && !(evread->ev_events & EV_PERSIST))
event_del(evread);
if (evwrite != NULL && evwrite != evread &&
!(evwrite->ev_events & EV_PERSIST))
event_del(evwrite);
if (evread != NULL)
event_active(evread, EV_READ, 1);
if (evwrite != NULL)
event_active(evwrite, EV_WRITE, 1);
}
return (0);
}
static int
devpoll_add(void *arg, struct event *ev)
{
struct devpollop *devpollop = arg;
struct evdevpoll *evdp;
int fd, events;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
fd = ev->ev_fd;
if (fd >= devpollop->nfds) {
/* Extend the file descriptor array as necessary */
if (devpoll_recalc(ev->ev_base, devpollop, fd) == -1)
return (-1);
}
evdp = &devpollop->fds[fd];
/*
* It's not necessary to OR the existing read/write events that we
* are currently interested in with the new event we are adding.
* The /dev/poll driver ORs any new events with the existing events
* that it has cached for the fd.
*/
events = 0;
if (ev->ev_events & EV_READ) {
if (evdp->evread && evdp->evread != ev) {
/* There is already a different read event registered */
return(-1);
}
events |= POLLIN;
}
if (ev->ev_events & EV_WRITE) {
if (evdp->evwrite && evdp->evwrite != ev) {
/* There is already a different write event registered */
return(-1);
}
events |= POLLOUT;
}
if (devpoll_queue(devpollop, fd, events) != 0)
return(-1);
/* Update events responsible */
if (ev->ev_events & EV_READ)
evdp->evread = ev;
if (ev->ev_events & EV_WRITE)
evdp->evwrite = ev;
return (0);
}
static int
devpoll_del(void *arg, struct event *ev)
{
struct devpollop *devpollop = arg;
struct evdevpoll *evdp;
int fd, events;
int needwritedelete = 1, needreaddelete = 1;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
fd = ev->ev_fd;
if (fd >= devpollop->nfds)
return (0);
evdp = &devpollop->fds[fd];
events = 0;
if (ev->ev_events & EV_READ)
events |= POLLIN;
if (ev->ev_events & EV_WRITE)
events |= POLLOUT;
/*
* The only way to remove an fd from the /dev/poll monitored set is
* to use POLLREMOVE by itself. This removes ALL events for the fd
* provided so if we care about two events and are only removing one
* we must re-add the other event after POLLREMOVE.
*/
if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
return(-1);
if ((events & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
/*
* We're not deleting all events, so we must resubmit the
* event that we are still interested in if one exists.
*/
if ((events & POLLIN) && evdp->evwrite != NULL) {
/* Deleting read, still care about write */
devpoll_queue(devpollop, fd, POLLOUT);
needwritedelete = 0;
} else if ((events & POLLOUT) && evdp->evread != NULL) {
/* Deleting write, still care about read */
devpoll_queue(devpollop, fd, POLLIN);
needreaddelete = 0;
}
}
if (needreaddelete)
evdp->evread = NULL;
if (needwritedelete)
evdp->evwrite = NULL;
return (0);
}
static void
devpoll_dealloc(struct event_base *base, void *arg)
{
struct devpollop *devpollop = arg;
evsignal_dealloc(base);
if (devpollop->fds)
free(devpollop->fds);
if (devpollop->events)
free(devpollop->events);
if (devpollop->changes)
free(devpollop->changes);
if (devpollop->dpfd >= 0)
close(devpollop->dpfd);
memset(devpollop, 0, sizeof(struct devpollop));
free(devpollop);
}

View file

@ -0,0 +1,376 @@
/*
* Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <stdint.h>
#include <sys/types.h>
#include <sys/resource.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#include <sys/queue.h>
#include <sys/epoll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
/* due to limitations in the epoll interface, we need to keep track of
* all file descriptors outself.
*/
struct evepoll {
struct event *evread;
struct event *evwrite;
};
struct epollop {
struct evepoll *fds;
int nfds;
struct epoll_event *events;
int nevents;
int epfd;
};
static void *epoll_init (struct event_base *);
static int epoll_add (void *, struct event *);
static int epoll_del (void *, struct event *);
static int epoll_dispatch (struct event_base *, void *, struct timeval *);
static void epoll_dealloc (struct event_base *, void *);
const struct eventop epollops = {
"epoll",
epoll_init,
epoll_add,
epoll_del,
epoll_dispatch,
epoll_dealloc,
1 /* need reinit */
};
#ifdef HAVE_SETFD
#define FD_CLOSEONEXEC(x) do { \
if (fcntl(x, F_SETFD, 1) == -1) \
event_warn("fcntl(%d, F_SETFD)", x); \
} while (0)
#else
#define FD_CLOSEONEXEC(x)
#endif
/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
* values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be
* as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
* largest number of msec we can support here is 2147482. Let's
* round that down by 47 seconds.
*/
#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
#define INITIAL_NFILES 32
#define INITIAL_NEVENTS 32
#define MAX_NEVENTS 4096
static void *
epoll_init(struct event_base *base)
{
int epfd;
struct epollop *epollop;
/* Disable epollueue when this environment variable is set */
if (evutil_getenv("EVENT_NOEPOLL"))
return (NULL);
/* Initalize the kernel queue */
if ((epfd = epoll_create(32000)) == -1) {
if (errno != ENOSYS)
event_warn("epoll_create");
return (NULL);
}
FD_CLOSEONEXEC(epfd);
if (!(epollop = calloc(1, sizeof(struct epollop))))
return (NULL);
epollop->epfd = epfd;
/* Initalize fields */
epollop->events = malloc(INITIAL_NEVENTS * sizeof(struct epoll_event));
if (epollop->events == NULL) {
free(epollop);
return (NULL);
}
epollop->nevents = INITIAL_NEVENTS;
epollop->fds = calloc(INITIAL_NFILES, sizeof(struct evepoll));
if (epollop->fds == NULL) {
free(epollop->events);
free(epollop);
return (NULL);
}
epollop->nfds = INITIAL_NFILES;
evsignal_init(base);
return (epollop);
}
static int
epoll_recalc(struct event_base *base, void *arg, int max)
{
struct epollop *epollop = arg;
if (max >= epollop->nfds) {
struct evepoll *fds;
int nfds;
nfds = epollop->nfds;
while (nfds <= max)
nfds <<= 1;
fds = realloc(epollop->fds, nfds * sizeof(struct evepoll));
if (fds == NULL) {
event_warn("realloc");
return (-1);
}
epollop->fds = fds;
memset(fds + epollop->nfds, 0,
(nfds - epollop->nfds) * sizeof(struct evepoll));
epollop->nfds = nfds;
}
return (0);
}
static int
epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
struct epollop *epollop = arg;
struct epoll_event *events = epollop->events;
struct evepoll *evep;
int i, res, timeout = -1;
if (tv != NULL)
timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
if (timeout > MAX_EPOLL_TIMEOUT_MSEC) {
/* Linux kernels can wait forever if the timeout is too big;
* see comment on MAX_EPOLL_TIMEOUT_MSEC. */
timeout = MAX_EPOLL_TIMEOUT_MSEC;
}
res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
if (res == -1) {
if (errno != EINTR) {
event_warn("epoll_wait");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: epoll_wait reports %d", __func__, res));
for (i = 0; i < res; i++) {
int what = events[i].events;
struct event *evread = NULL, *evwrite = NULL;
int fd = events[i].data.fd;
if (fd < 0 || fd >= epollop->nfds)
continue;
evep = &epollop->fds[fd];
if (what & (EPOLLHUP|EPOLLERR)) {
evread = evep->evread;
evwrite = evep->evwrite;
} else {
if (what & EPOLLIN) {
evread = evep->evread;
}
if (what & EPOLLOUT) {
evwrite = evep->evwrite;
}
}
if (!(evread||evwrite))
continue;
if (evread != NULL)
event_active(evread, EV_READ, 1);
if (evwrite != NULL)
event_active(evwrite, EV_WRITE, 1);
}
if (res == epollop->nevents && epollop->nevents < MAX_NEVENTS) {
/* We used all of the event space this time. We should
be ready for more events next time. */
int new_nevents = epollop->nevents * 2;
struct epoll_event *new_events;
new_events = realloc(epollop->events,
new_nevents * sizeof(struct epoll_event));
if (new_events) {
epollop->events = new_events;
epollop->nevents = new_nevents;
}
}
return (0);
}
static int
epoll_add(void *arg, struct event *ev)
{
struct epollop *epollop = arg;
struct epoll_event epev = {0, {0}};
struct evepoll *evep;
int fd, op, events;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
fd = ev->ev_fd;
if (fd >= epollop->nfds) {
/* Extent the file descriptor array as necessary */
if (epoll_recalc(ev->ev_base, epollop, fd) == -1)
return (-1);
}
evep = &epollop->fds[fd];
op = EPOLL_CTL_ADD;
events = 0;
if (evep->evread != NULL) {
events |= EPOLLIN;
op = EPOLL_CTL_MOD;
}
if (evep->evwrite != NULL) {
events |= EPOLLOUT;
op = EPOLL_CTL_MOD;
}
if (ev->ev_events & EV_READ)
events |= EPOLLIN;
if (ev->ev_events & EV_WRITE)
events |= EPOLLOUT;
epev.data.fd = fd;
epev.events = events;
if (epoll_ctl(epollop->epfd, op, ev->ev_fd, &epev) == -1)
return (-1);
/* Update events responsible */
if (ev->ev_events & EV_READ)
evep->evread = ev;
if (ev->ev_events & EV_WRITE)
evep->evwrite = ev;
return (0);
}
static int
epoll_del(void *arg, struct event *ev)
{
struct epollop *epollop = arg;
struct epoll_event epev = {0, {0}};
struct evepoll *evep;
int fd, events, op;
int needwritedelete = 1, needreaddelete = 1;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
fd = ev->ev_fd;
if (fd >= epollop->nfds)
return (0);
evep = &epollop->fds[fd];
op = EPOLL_CTL_DEL;
events = 0;
if (ev->ev_events & EV_READ)
events |= EPOLLIN;
if (ev->ev_events & EV_WRITE)
events |= EPOLLOUT;
if ((events & (EPOLLIN|EPOLLOUT)) != (EPOLLIN|EPOLLOUT)) {
if ((events & EPOLLIN) && evep->evwrite != NULL) {
needwritedelete = 0;
events = EPOLLOUT;
op = EPOLL_CTL_MOD;
} else if ((events & EPOLLOUT) && evep->evread != NULL) {
needreaddelete = 0;
events = EPOLLIN;
op = EPOLL_CTL_MOD;
}
}
epev.events = events;
epev.data.fd = fd;
if (needreaddelete)
evep->evread = NULL;
if (needwritedelete)
evep->evwrite = NULL;
if (epoll_ctl(epollop->epfd, op, fd, &epev) == -1)
return (-1);
return (0);
}
static void
epoll_dealloc(struct event_base *base, void *arg)
{
struct epollop *epollop = arg;
evsignal_dealloc(base);
if (epollop->fds)
free(epollop->fds);
if (epollop->events)
free(epollop->events);
if (epollop->epfd >= 0)
close(epollop->epfd);
memset(epollop, 0, sizeof(struct epollop));
free(epollop);
}

View file

@ -0,0 +1,52 @@
/*
* Copyright 2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/epoll.h>
#include <unistd.h>
int
epoll_create(int size)
{
return (syscall(__NR_epoll_create, size));
}
int
epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
{
return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
}
int
epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
{
return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
}

View file

@ -0,0 +1,453 @@
/*
* Copyright (c) 2002-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "config.h"
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
#include "evutil.h"
#include "event.h"
/* prototypes */
void bufferevent_read_pressure_cb(struct evbuffer *, size_t, size_t, void *);
static int
bufferevent_add(struct event *ev, int timeout)
{
struct timeval tv, *ptv = NULL;
if (timeout) {
evutil_timerclear(&tv);
tv.tv_sec = timeout;
ptv = &tv;
}
return (event_add(ev, ptv));
}
/*
* This callback is executed when the size of the input buffer changes.
* We use it to apply back pressure on the reading side.
*/
void
bufferevent_read_pressure_cb(struct evbuffer *buf, size_t old, size_t now,
void *arg) {
struct bufferevent *bufev = arg;
/*
* If we are below the watermark then reschedule reading if it's
* still enabled.
*/
if (bufev->wm_read.high == 0 || now < bufev->wm_read.high) {
evbuffer_setcb(buf, NULL, NULL);
if (bufev->enabled & EV_READ)
bufferevent_add(&bufev->ev_read, bufev->timeout_read);
}
}
static void
bufferevent_readcb(int fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
int res = 0;
short what = EVBUFFER_READ;
size_t len;
int howmuch = -1;
if (event == EV_TIMEOUT) {
what |= EVBUFFER_TIMEOUT;
goto error;
}
/*
* If we have a high watermark configured then we don't want to
* read more data than would make us reach the watermark.
*/
if (bufev->wm_read.high != 0) {
howmuch = bufev->wm_read.high - EVBUFFER_LENGTH(bufev->input);
/* we might have lowered the watermark, stop reading */
if (howmuch <= 0) {
struct evbuffer *buf = bufev->input;
event_del(&bufev->ev_read);
evbuffer_setcb(buf,
bufferevent_read_pressure_cb, bufev);
return;
}
}
res = evbuffer_read(bufev->input, fd, howmuch);
if (res == -1) {
if (errno == EAGAIN || errno == EINTR)
goto reschedule;
/* error case */
what |= EVBUFFER_ERROR;
} else if (res == 0) {
/* eof case */
what |= EVBUFFER_EOF;
}
if (res <= 0)
goto error;
bufferevent_add(&bufev->ev_read, bufev->timeout_read);
/* See if this callbacks meets the water marks */
len = EVBUFFER_LENGTH(bufev->input);
if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
return;
if (bufev->wm_read.high != 0 && len >= bufev->wm_read.high) {
struct evbuffer *buf = bufev->input;
event_del(&bufev->ev_read);
/* Now schedule a callback for us when the buffer changes */
evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev);
}
/* Invoke the user callback - must always be called last */
if (bufev->readcb != NULL)
(*bufev->readcb)(bufev, bufev->cbarg);
return;
reschedule:
bufferevent_add(&bufev->ev_read, bufev->timeout_read);
return;
error:
(*bufev->errorcb)(bufev, what, bufev->cbarg);
}
static void
bufferevent_writecb(int fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
int res = 0;
short what = EVBUFFER_WRITE;
if (event == EV_TIMEOUT) {
what |= EVBUFFER_TIMEOUT;
goto error;
}
if (EVBUFFER_LENGTH(bufev->output)) {
res = evbuffer_write(bufev->output, fd);
if (res == -1) {
#ifndef WIN32
/*todo. evbuffer uses WriteFile when WIN32 is set. WIN32 system calls do not
*set errno. thus this error checking is not portable*/
if (errno == EAGAIN ||
errno == EINTR ||
errno == EINPROGRESS)
goto reschedule;
/* error case */
what |= EVBUFFER_ERROR;
#else
goto reschedule;
#endif
} else if (res == 0) {
/* eof case */
what |= EVBUFFER_EOF;
}
if (res <= 0)
goto error;
}
if (EVBUFFER_LENGTH(bufev->output) != 0)
bufferevent_add(&bufev->ev_write, bufev->timeout_write);
/*
* Invoke the user callback if our buffer is drained or below the
* low watermark.
*/
if (bufev->writecb != NULL &&
EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
(*bufev->writecb)(bufev, bufev->cbarg);
return;
reschedule:
if (EVBUFFER_LENGTH(bufev->output) != 0)
bufferevent_add(&bufev->ev_write, bufev->timeout_write);
return;
error:
(*bufev->errorcb)(bufev, what, bufev->cbarg);
}
/*
* Create a new buffered event object.
*
* The read callback is invoked whenever we read new data.
* The write callback is invoked whenever the output buffer is drained.
* The error callback is invoked on a write/read error or on EOF.
*
* Both read and write callbacks maybe NULL. The error callback is not
* allowed to be NULL and have to be provided always.
*/
struct bufferevent *
bufferevent_new(int fd, evbuffercb readcb, evbuffercb writecb,
everrorcb errorcb, void *cbarg)
{
struct bufferevent *bufev;
if ((bufev = calloc(1, sizeof(struct bufferevent))) == NULL)
return (NULL);
if ((bufev->input = evbuffer_new()) == NULL) {
free(bufev);
return (NULL);
}
if ((bufev->output = evbuffer_new()) == NULL) {
evbuffer_free(bufev->input);
free(bufev);
return (NULL);
}
event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
bufferevent_setcb(bufev, readcb, writecb, errorcb, cbarg);
/*
* Set to EV_WRITE so that using bufferevent_write is going to
* trigger a callback. Reading needs to be explicitly enabled
* because otherwise no data will be available.
*/
bufev->enabled = EV_WRITE;
return (bufev);
}
void
bufferevent_setcb(struct bufferevent *bufev,
evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg)
{
bufev->readcb = readcb;
bufev->writecb = writecb;
bufev->errorcb = errorcb;
bufev->cbarg = cbarg;
}
void
bufferevent_setfd(struct bufferevent *bufev, int fd)
{
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
if (bufev->ev_base != NULL) {
event_base_set(bufev->ev_base, &bufev->ev_read);
event_base_set(bufev->ev_base, &bufev->ev_write);
}
/* might have to manually trigger event registration */
}
int
bufferevent_priority_set(struct bufferevent *bufev, int priority)
{
if (event_priority_set(&bufev->ev_read, priority) == -1)
return (-1);
if (event_priority_set(&bufev->ev_write, priority) == -1)
return (-1);
return (0);
}
/* Closing the file descriptor is the responsibility of the caller */
void
bufferevent_free(struct bufferevent *bufev)
{
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
evbuffer_free(bufev->input);
evbuffer_free(bufev->output);
free(bufev);
}
/*
* Returns 0 on success;
* -1 on failure.
*/
int
bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
{
int res;
res = evbuffer_add(bufev->output, data, size);
if (res == -1)
return (res);
/* If everything is okay, we need to schedule a write */
if (size > 0 && (bufev->enabled & EV_WRITE))
bufferevent_add(&bufev->ev_write, bufev->timeout_write);
return (res);
}
int
bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
{
int res;
res = bufferevent_write(bufev, buf->buffer, buf->off);
if (res != -1)
evbuffer_drain(buf, buf->off);
return (res);
}
size_t
bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
{
struct evbuffer *buf = bufev->input;
if (buf->off < size)
size = buf->off;
/* Copy the available data to the user buffer */
memcpy(data, buf->buffer, size);
if (size)
evbuffer_drain(buf, size);
return (size);
}
int
bufferevent_enable(struct bufferevent *bufev, short event)
{
if (event & EV_READ) {
if (bufferevent_add(&bufev->ev_read, bufev->timeout_read) == -1)
return (-1);
}
if (event & EV_WRITE) {
if (bufferevent_add(&bufev->ev_write, bufev->timeout_write) == -1)
return (-1);
}
bufev->enabled |= event;
return (0);
}
int
bufferevent_disable(struct bufferevent *bufev, short event)
{
if (event & EV_READ) {
if (event_del(&bufev->ev_read) == -1)
return (-1);
}
if (event & EV_WRITE) {
if (event_del(&bufev->ev_write) == -1)
return (-1);
}
bufev->enabled &= ~event;
return (0);
}
/*
* Sets the read and write timeout for a buffered event.
*/
void
bufferevent_settimeout(struct bufferevent *bufev,
int timeout_read, int timeout_write) {
bufev->timeout_read = timeout_read;
bufev->timeout_write = timeout_write;
if (event_pending(&bufev->ev_read, EV_READ, NULL))
bufferevent_add(&bufev->ev_read, timeout_read);
if (event_pending(&bufev->ev_write, EV_WRITE, NULL))
bufferevent_add(&bufev->ev_write, timeout_write);
}
/*
* Sets the water marks
*/
void
bufferevent_setwatermark(struct bufferevent *bufev, short events,
size_t lowmark, size_t highmark)
{
if (events & EV_READ) {
bufev->wm_read.low = lowmark;
bufev->wm_read.high = highmark;
}
if (events & EV_WRITE) {
bufev->wm_write.low = lowmark;
bufev->wm_write.high = highmark;
}
/* If the watermarks changed then see if we should call read again */
bufferevent_read_pressure_cb(bufev->input,
0, EVBUFFER_LENGTH(bufev->input), bufev);
}
int
bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
{
int res;
bufev->ev_base = base;
res = event_base_set(base, &bufev->ev_read);
if (res == -1)
return (res);
res = event_base_set(base, &bufev->ev_write);
return (res);
}

View file

@ -0,0 +1,322 @@
.\"
.\" Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\"
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 3. The name of the author may not be used to endorse or promote products
.\" derived from this software without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd October 7, 2006
.Dt EVDNS 3
.Os
.Sh NAME
.Nm evdns_init
.Nm evdns_shutdown
.Nm evdns_err_to_string
.Nm evdns_nameserver_add
.Nm evdns_count_nameservers
.Nm evdns_clear_nameservers_and_suspend
.Nm evdns_resume
.Nm evdns_nameserver_ip_add
.Nm evdns_resolve_ipv4
.Nm evdns_resolve_reverse
.Nm evdns_resolv_conf_parse
.Nm evdns_config_windows_nameservers
.Nm evdns_search_clear
.Nm evdns_search_add
.Nm evdns_search_ndots_set
.Nm evdns_set_log_fn
.Nd asynchronous functions for DNS resolution.
.Sh SYNOPSIS
.Fd #include <sys/time.h>
.Fd #include <event.h>
.Fd #include <evdns.h>
.Ft int
.Fn evdns_init
.Ft void
.Fn evdns_shutdown "int fail_requests"
.Ft "const char *"
.Fn evdns_err_to_string "int err"
.Ft int
.Fn evdns_nameserver_add "unsigned long int address"
.Ft int
.Fn evdns_count_nameservers
.Ft int
.Fn evdns_clear_nameservers_and_suspend
.Ft int
.Fn evdns_resume
.Ft int
.Fn evdns_nameserver_ip_add(const char *ip_as_string);
.Ft int
.Fn evdns_resolve_ipv4 "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
.Ft int
.Fn evdns_resolve_reverse "struct in_addr *in" "int flags" "evdns_callback_type callback" "void *ptr"
.Ft int
.Fn evdns_resolv_conf_parse "int flags" "const char *"
.Ft void
.Fn evdns_search_clear
.Ft void
.Fn evdns_search_add "const char *domain"
.Ft void
.Fn evdns_search_ndots_set "const int ndots"
.Ft void
.Fn evdns_set_log_fn "evdns_debug_log_fn_type fn"
.Ft int
.Fn evdns_config_windows_nameservers
.Sh DESCRIPTION
Welcome, gentle reader
.Pp
Async DNS lookups are really a whole lot harder than they should be,
mostly stemming from the fact that the libc resolver has never been
very good at them. Before you use this library you should see if libc
can do the job for you with the modern async call getaddrinfo_a
(see http://www.imperialviolet.org/page25.html#e498). Otherwise,
please continue.
.Pp
This code is based on libevent and you must call event_init before
any of the APIs in this file. You must also seed the OpenSSL random
source if you are using OpenSSL for ids (see below).
.Pp
This library is designed to be included and shipped with your source
code. You statically link with it. You should also test for the
existence of strtok_r and define HAVE_STRTOK_R if you have it.
.Pp
The DNS protocol requires a good source of id numbers and these
numbers should be unpredictable for spoofing reasons. There are
three methods for generating them here and you must define exactly
one of them. In increasing order of preference:
.Pp
.Bl -tag -width "DNS_USE_GETTIMEOFDAY_FOR_ID" -compact -offset indent
.It DNS_USE_GETTIMEOFDAY_FOR_ID
Using the bottom 16 bits of the usec result from gettimeofday. This
is a pretty poor solution but should work anywhere.
.It DNS_USE_CPU_CLOCK_FOR_ID
Using the bottom 16 bits of the nsec result from the CPU's time
counter. This is better, but may not work everywhere. Requires
POSIX realtime support and you'll need to link against -lrt on
glibc systems at least.
.It DNS_USE_OPENSSL_FOR_ID
Uses the OpenSSL RAND_bytes call to generate the data. You must
have seeded the pool before making any calls to this library.
.El
.Pp
The library keeps track of the state of nameservers and will avoid
them when they go down. Otherwise it will round robin between them.
.Pp
Quick start guide:
#include "evdns.h"
void callback(int result, char type, int count, int ttl,
void *addresses, void *arg);
evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
evdns_resolve("www.hostname.com", 0, callback, NULL);
.Pp
When the lookup is complete the callback function is called. The
first argument will be one of the DNS_ERR_* defines in evdns.h.
Hopefully it will be DNS_ERR_NONE, in which case type will be
DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
which the data can be cached for (in seconds), addresses will point
to an array of uint32_t's and arg will be whatever you passed to
evdns_resolve.
.Pp
Searching:
.Pp
In order for this library to be a good replacement for glibc's resolver it
supports searching. This involves setting a list of default domains, in
which names will be queried for. The number of dots in the query name
determines the order in which this list is used.
.Pp
Searching appears to be a single lookup from the point of view of the API,
although many DNS queries may be generated from a single call to
evdns_resolve. Searching can also drastically slow down the resolution
of names.
.Pp
To disable searching:
.Bl -enum -compact -offset indent
.It
Never set it up. If you never call
.Fn evdns_resolv_conf_parse,
.Fn evdns_init,
or
.Fn evdns_search_add
then no searching will occur.
.It
If you do call
.Fn evdns_resolv_conf_parse
then don't pass
.Va DNS_OPTION_SEARCH
(or
.Va DNS_OPTIONS_ALL,
which implies it).
.It
When calling
.Fn evdns_resolve,
pass the
.Va DNS_QUERY_NO_SEARCH
flag.
.El
.Pp
The order of searches depends on the number of dots in the name. If the
number is greater than the ndots setting then the names is first tried
globally. Otherwise each search domain is appended in turn.
.Pp
The ndots setting can either be set from a resolv.conf, or by calling
evdns_search_ndots_set.
.Pp
For example, with ndots set to 1 (the default) and a search domain list of
["myhome.net"]:
Query: www
Order: www.myhome.net, www.
.Pp
Query: www.abc
Order: www.abc., www.abc.myhome.net
.Pp
.Sh API reference
.Pp
.Bl -tag -width 0123456
.It Ft int Fn evdns_init
Initializes support for non-blocking name resolution by calling
.Fn evdns_resolv_conf_parse
on UNIX and
.Fn evdns_config_windows_nameservers
on Windows.
.It Ft int Fn evdns_nameserver_add "unsigned long int address"
Add a nameserver. The address should be an IP address in
network byte order. The type of address is chosen so that
it matches in_addr.s_addr.
Returns non-zero on error.
.It Ft int Fn evdns_nameserver_ip_add "const char *ip_as_string"
This wraps the above function by parsing a string as an IP
address and adds it as a nameserver.
Returns non-zero on error
.It Ft int Fn evdns_resolve "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
Resolve a name. The name parameter should be a DNS name.
The flags parameter should be 0, or DNS_QUERY_NO_SEARCH
which disables searching for this query. (see defn of
searching above).
.Pp
The callback argument is a function which is called when
this query completes and ptr is an argument which is passed
to that callback function.
.Pp
Returns non-zero on error
.It Ft void Fn evdns_search_clear
Clears the list of search domains
.It Ft void Fn evdns_search_add "const char *domain"
Add a domain to the list of search domains
.It Ft void Fn evdns_search_ndots_set "int ndots"
Set the number of dots which, when found in a name, causes
the first query to be without any search domain.
.It Ft int Fn evdns_count_nameservers "void"
Return the number of configured nameservers (not necessarily the
number of running nameservers). This is useful for double-checking
whether our calls to the various nameserver configuration functions
have been successful.
.It Ft int Fn evdns_clear_nameservers_and_suspend "void"
Remove all currently configured nameservers, and suspend all pending
resolves. Resolves will not necessarily be re-attempted until
evdns_resume() is called.
.It Ft int Fn evdns_resume "void"
Re-attempt resolves left in limbo after an earlier call to
evdns_clear_nameservers_and_suspend().
.It Ft int Fn evdns_config_windows_nameservers "void"
Attempt to configure a set of nameservers based on platform settings on
a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
looks in the registry. Returns 0 on success, nonzero on failure.
.It Ft int Fn evdns_resolv_conf_parse "int flags" "const char *filename"
Parse a resolv.conf like file from the given filename.
.Pp
See the man page for resolv.conf for the format of this file.
The flags argument determines what information is parsed from
this file:
.Bl -tag -width "DNS_OPTION_NAMESERVERS" -offset indent -compact -nested
.It DNS_OPTION_SEARCH
domain, search and ndots options
.It DNS_OPTION_NAMESERVERS
nameserver lines
.It DNS_OPTION_MISC
timeout and attempts options
.It DNS_OPTIONS_ALL
all of the above
.El
.Pp
The following directives are not parsed from the file:
sortlist, rotate, no-check-names, inet6, debug
.Pp
Returns non-zero on error:
.Bl -tag -width "0" -offset indent -compact -nested
.It 0
no errors
.It 1
failed to open file
.It 2
failed to stat file
.It 3
file too large
.It 4
out of memory
.It 5
short read from file
.El
.El
.Sh Internals:
Requests are kept in two queues. The first is the inflight queue. In
this queue requests have an allocated transaction id and nameserver.
They will soon be transmitted if they haven't already been.
.Pp
The second is the waiting queue. The size of the inflight ring is
limited and all other requests wait in waiting queue for space. This
bounds the number of concurrent requests so that we don't flood the
nameserver. Several algorithms require a full walk of the inflight
queue and so bounding its size keeps thing going nicely under huge
(many thousands of requests) loads.
.Pp
If a nameserver loses too many requests it is considered down and we
try not to use it. After a while we send a probe to that nameserver
(a lookup for google.com) and, if it replies, we consider it working
again. If the nameserver fails a probe we wait longer to try again
with the next probe.
.Sh SEE ALSO
.Xr event 3 ,
.Xr gethostbyname 3 ,
.Xr resolv.conf 5
.Sh HISTORY
The
.Nm evdns
API was developed by Adam Langley on top of the
.Nm libevent
API.
The code was integrate into
.Nm Tor
by Nick Mathewson and finally put into
.Nm libevent
itself by Niels Provos.
.Sh AUTHORS
The
.Nm evdns
API and code was written by Adam Langley with significant
contributions by Nick Mathewson.
.Sh BUGS
This documentation is neither complete nor authoritative.
If you are in doubt about the usage of this API then
check the source code to find out how it works, write
up the missing piece of documentation and send it to
me for inclusion in this man page.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,528 @@
/*
* Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* The original DNS code is due to Adam Langley with heavy
* modifications by Nick Mathewson. Adam put his DNS software in the
* public domain. You can find his original copyright below. Please,
* aware that the code as part of libevent is governed by the 3-clause
* BSD license above.
*
* This software is Public Domain. To view a copy of the public domain dedication,
* visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
* Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
*
* I ask and expect, but do not require, that all derivative works contain an
* attribution similar to:
* Parts developed by Adam Langley <agl@imperialviolet.org>
*
* You may wish to replace the word "Parts" with something else depending on
* the amount of original code.
*
* (Derivative works does not include programs which link against, run or include
* the source verbatim in their source distributions)
*/
/** @file evdns.h
*
* Welcome, gentle reader
*
* Async DNS lookups are really a whole lot harder than they should be,
* mostly stemming from the fact that the libc resolver has never been
* very good at them. Before you use this library you should see if libc
* can do the job for you with the modern async call getaddrinfo_a
* (see http://www.imperialviolet.org/page25.html#e498). Otherwise,
* please continue.
*
* This code is based on libevent and you must call event_init before
* any of the APIs in this file. You must also seed the OpenSSL random
* source if you are using OpenSSL for ids (see below).
*
* This library is designed to be included and shipped with your source
* code. You statically link with it. You should also test for the
* existence of strtok_r and define HAVE_STRTOK_R if you have it.
*
* The DNS protocol requires a good source of id numbers and these
* numbers should be unpredictable for spoofing reasons. There are
* three methods for generating them here and you must define exactly
* one of them. In increasing order of preference:
*
* DNS_USE_GETTIMEOFDAY_FOR_ID:
* Using the bottom 16 bits of the usec result from gettimeofday. This
* is a pretty poor solution but should work anywhere.
* DNS_USE_CPU_CLOCK_FOR_ID:
* Using the bottom 16 bits of the nsec result from the CPU's time
* counter. This is better, but may not work everywhere. Requires
* POSIX realtime support and you'll need to link against -lrt on
* glibc systems at least.
* DNS_USE_OPENSSL_FOR_ID:
* Uses the OpenSSL RAND_bytes call to generate the data. You must
* have seeded the pool before making any calls to this library.
*
* The library keeps track of the state of nameservers and will avoid
* them when they go down. Otherwise it will round robin between them.
*
* Quick start guide:
* #include "evdns.h"
* void callback(int result, char type, int count, int ttl,
* void *addresses, void *arg);
* evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
* evdns_resolve("www.hostname.com", 0, callback, NULL);
*
* When the lookup is complete the callback function is called. The
* first argument will be one of the DNS_ERR_* defines in evdns.h.
* Hopefully it will be DNS_ERR_NONE, in which case type will be
* DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
* which the data can be cached for (in seconds), addresses will point
* to an array of uint32_t's and arg will be whatever you passed to
* evdns_resolve.
*
* Searching:
*
* In order for this library to be a good replacement for glibc's resolver it
* supports searching. This involves setting a list of default domains, in
* which names will be queried for. The number of dots in the query name
* determines the order in which this list is used.
*
* Searching appears to be a single lookup from the point of view of the API,
* although many DNS queries may be generated from a single call to
* evdns_resolve. Searching can also drastically slow down the resolution
* of names.
*
* To disable searching:
* 1. Never set it up. If you never call evdns_resolv_conf_parse or
* evdns_search_add then no searching will occur.
*
* 2. If you do call evdns_resolv_conf_parse then don't pass
* DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it).
*
* 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag.
*
* The order of searches depends on the number of dots in the name. If the
* number is greater than the ndots setting then the names is first tried
* globally. Otherwise each search domain is appended in turn.
*
* The ndots setting can either be set from a resolv.conf, or by calling
* evdns_search_ndots_set.
*
* For example, with ndots set to 1 (the default) and a search domain list of
* ["myhome.net"]:
* Query: www
* Order: www.myhome.net, www.
*
* Query: www.abc
* Order: www.abc., www.abc.myhome.net
*
* Internals:
*
* Requests are kept in two queues. The first is the inflight queue. In
* this queue requests have an allocated transaction id and nameserver.
* They will soon be transmitted if they haven't already been.
*
* The second is the waiting queue. The size of the inflight ring is
* limited and all other requests wait in waiting queue for space. This
* bounds the number of concurrent requests so that we don't flood the
* nameserver. Several algorithms require a full walk of the inflight
* queue and so bounding its size keeps thing going nicely under huge
* (many thousands of requests) loads.
*
* If a nameserver loses too many requests it is considered down and we
* try not to use it. After a while we send a probe to that nameserver
* (a lookup for google.com) and, if it replies, we consider it working
* again. If the nameserver fails a probe we wait longer to try again
* with the next probe.
*/
#ifndef EVENTDNS_H
#define EVENTDNS_H
#ifdef __cplusplus
extern "C" {
#endif
/* For integer types. */
#include "evutil.h"
/** Error codes 0-5 are as described in RFC 1035. */
#define DNS_ERR_NONE 0
/** The name server was unable to interpret the query */
#define DNS_ERR_FORMAT 1
/** The name server was unable to process this query due to a problem with the
* name server */
#define DNS_ERR_SERVERFAILED 2
/** The domain name does not exist */
#define DNS_ERR_NOTEXIST 3
/** The name server does not support the requested kind of query */
#define DNS_ERR_NOTIMPL 4
/** The name server refuses to reform the specified operation for policy
* reasons */
#define DNS_ERR_REFUSED 5
/** The reply was truncated or ill-formated */
#define DNS_ERR_TRUNCATED 65
/** An unknown error occurred */
#define DNS_ERR_UNKNOWN 66
/** Communication with the server timed out */
#define DNS_ERR_TIMEOUT 67
/** The request was canceled because the DNS subsystem was shut down. */
#define DNS_ERR_SHUTDOWN 68
#define DNS_IPv4_A 1
#define DNS_PTR 2
#define DNS_IPv6_AAAA 3
#define DNS_QUERY_NO_SEARCH 1
#define DNS_OPTION_SEARCH 1
#define DNS_OPTION_NAMESERVERS 2
#define DNS_OPTION_MISC 4
#define DNS_OPTIONS_ALL 7
/**
* The callback that contains the results from a lookup.
* - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA
* - count contains the number of addresses of form type
* - ttl is the number of seconds the resolution may be cached for.
* - addresses needs to be cast according to type
*/
typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg);
/**
Initialize the asynchronous DNS library.
This function initializes support for non-blocking name resolution by
calling evdns_resolv_conf_parse() on UNIX and
evdns_config_windows_nameservers() on Windows.
@return 0 if successful, or -1 if an error occurred
@see evdns_shutdown()
*/
int evdns_init(void);
/**
Shut down the asynchronous DNS resolver and terminate all active requests.
If the 'fail_requests' option is enabled, all active requests will return
an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
the requests will be silently discarded.
@param fail_requests if zero, active requests will be aborted; if non-zero,
active requests will return DNS_ERR_SHUTDOWN.
@see evdns_init()
*/
void evdns_shutdown(int fail_requests);
/**
Convert a DNS error code to a string.
@param err the DNS error code
@return a string containing an explanation of the error code
*/
const char *evdns_err_to_string(int err);
/**
Add a nameserver.
The address should be an IPv4 address in network byte order.
The type of address is chosen so that it matches in_addr.s_addr.
@param address an IP address in network byte order
@return 0 if successful, or -1 if an error occurred
@see evdns_nameserver_ip_add()
*/
int evdns_nameserver_add(unsigned long int address);
/**
Get the number of configured nameservers.
This returns the number of configured nameservers (not necessarily the
number of running nameservers). This is useful for double-checking
whether our calls to the various nameserver configuration functions
have been successful.
@return the number of configured nameservers
@see evdns_nameserver_add()
*/
int evdns_count_nameservers(void);
/**
Remove all configured nameservers, and suspend all pending resolves.
Resolves will not necessarily be re-attempted until evdns_resume() is called.
@return 0 if successful, or -1 if an error occurred
@see evdns_resume()
*/
int evdns_clear_nameservers_and_suspend(void);
/**
Resume normal operation and continue any suspended resolve requests.
Re-attempt resolves left in limbo after an earlier call to
evdns_clear_nameservers_and_suspend().
@return 0 if successful, or -1 if an error occurred
@see evdns_clear_nameservers_and_suspend()
*/
int evdns_resume(void);
/**
Add a nameserver.
This wraps the evdns_nameserver_add() function by parsing a string as an IP
address and adds it as a nameserver.
@return 0 if successful, or -1 if an error occurred
@see evdns_nameserver_add()
*/
int evdns_nameserver_ip_add(const char *ip_as_string);
/**
Lookup an A record for a given name.
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup an AAAA record for a given name.
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr);
struct in_addr;
struct in6_addr;
/**
Lookup a PTR record for a given IP address.
@param in an IPv4 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup a PTR record for a given IPv6 address.
@param in an IPv6 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Set the value of a configuration option.
The currently available configuration options are:
ndots, timeout, max-timeouts, max-inflight, and attempts
@param option the name of the configuration option to be modified
@param val the value to be set
@param flags either 0 | DNS_OPTION_SEARCH | DNS_OPTION_MISC
@return 0 if successful, or -1 if an error occurred
*/
int evdns_set_option(const char *option, const char *val, int flags);
/**
Parse a resolv.conf file.
The 'flags' parameter determines what information is parsed from the
resolv.conf file. See the man page for resolv.conf for the format of this
file.
The following directives are not parsed from the file: sortlist, rotate,
no-check-names, inet6, debug.
If this function encounters an error, the possible return values are: 1 =
failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
memory, 5 = short read from file, 6 = no nameservers listed in the file
@param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
DNS_OPTIONS_ALL
@param filename the path to the resolv.conf file
@return 0 if successful, or various positive error codes if an error
occurred (see above)
@see resolv.conf(3), evdns_config_windows_nameservers()
*/
int evdns_resolv_conf_parse(int flags, const char *const filename);
/**
Obtain nameserver information using the Windows API.
Attempt to configure a set of nameservers based on platform settings on
a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
looks in the registry.
@return 0 if successful, or -1 if an error occurred
@see evdns_resolv_conf_parse()
*/
#ifdef WIN32
int evdns_config_windows_nameservers(void);
#endif
/**
Clear the list of search domains.
*/
void evdns_search_clear(void);
/**
Add a domain to the list of search domains
@param domain the domain to be added to the search list
*/
void evdns_search_add(const char *domain);
/**
Set the 'ndots' parameter for searches.
Sets the number of dots which, when found in a name, causes
the first query to be without any search domain.
@param ndots the new ndots parameter
*/
void evdns_search_ndots_set(const int ndots);
/**
A callback that is invoked when a log message is generated
@param is_warning indicates if the log message is a 'warning'
@param msg the content of the log message
*/
typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg);
/**
Set the callback function to handle log messages.
@param fn the callback to be invoked when a log message is generated
*/
void evdns_set_log_fn(evdns_debug_log_fn_type fn);
/**
Set a callback that will be invoked to generate transaction IDs. By
default, we pick transaction IDs based on the current clock time.
@param fn the new callback, or NULL to use the default.
*/
void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void));
#define DNS_NO_SEARCH 1
/*
* Structures and functions used to implement a DNS server.
*/
struct evdns_server_request {
int flags;
int nquestions;
struct evdns_server_question **questions;
};
struct evdns_server_question {
int type;
#ifdef __cplusplus
int dns_question_class;
#else
/* You should refer to this field as "dns_question_class". The
* name "class" works in C for backward compatibility, and will be
* removed in a future version. (1.5 or later). */
int class;
#define dns_question_class class
#endif
char name[1];
};
typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *);
#define EVDNS_ANSWER_SECTION 0
#define EVDNS_AUTHORITY_SECTION 1
#define EVDNS_ADDITIONAL_SECTION 2
#define EVDNS_TYPE_A 1
#define EVDNS_TYPE_NS 2
#define EVDNS_TYPE_CNAME 5
#define EVDNS_TYPE_SOA 6
#define EVDNS_TYPE_PTR 12
#define EVDNS_TYPE_MX 15
#define EVDNS_TYPE_TXT 16
#define EVDNS_TYPE_AAAA 28
#define EVDNS_QTYPE_AXFR 252
#define EVDNS_QTYPE_ALL 255
#define EVDNS_CLASS_INET 1
struct evdns_server_port *evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type callback, void *user_data);
void evdns_close_server_port(struct evdns_server_port *port);
int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data);
int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl);
int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl);
int evdns_server_request_respond(struct evdns_server_request *req, int err);
int evdns_server_request_drop(struct evdns_server_request *req);
struct sockaddr;
int evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len);
#ifdef __cplusplus
}
#endif
#endif /* !EVENTDNS_H */

View file

@ -0,0 +1,281 @@
/* Copied from Linux version and changed the features according Android, which
* is close to Linux */
#ifndef _EVENT_CONFIG_H_
#define _EVENT_CONFIG_H_
/* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
/* Define if clock_gettime is available in libc */
#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
/* Define is no secure id variant is available */
/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
/* Define to 1 if you have the `clock_gettime' function. */
#define _EVENT_HAVE_CLOCK_GETTIME 1
/* Define if /dev/poll is available */
/* #undef _EVENT_HAVE_DEVPOLL */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define _EVENT_HAVE_DLFCN_H 1
/* Define if your system supports the epoll system calls */
#define _EVENT_HAVE_EPOLL 1
/* Define to 1 if you have the `epoll_ctl' function. */
#define _EVENT_HAVE_EPOLL_CTL 1
/* Define if your system supports event ports */
/* #undef _EVENT_HAVE_EVENT_PORTS */
/* Define to 1 if you have the `fcntl' function. */
#define _EVENT_HAVE_FCNTL 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define _EVENT_HAVE_FCNTL_H 1
/* Define to 1 if the system has the type `fd_mask'. */
/* #undef _EVENT_HAVE_FD_MASK 1 */
/* Define to 1 if you have the `getaddrinfo' function. */
#define _EVENT_HAVE_GETADDRINFO 1
/* Define to 1 if you have the `getegid' function. */
#define _EVENT_HAVE_GETEGID 1
/* Define to 1 if you have the `geteuid' function. */
#define _EVENT_HAVE_GETEUID 1
/* Define to 1 if you have the `getnameinfo' function. */
#define _EVENT_HAVE_GETNAMEINFO 1
/* Define to 1 if you have the `gettimeofday' function. */
#define _EVENT_HAVE_GETTIMEOFDAY 1
/* Define to 1 if you have the `inet_ntop' function. */
#define _EVENT_HAVE_INET_NTOP 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define _EVENT_HAVE_INTTYPES_H 1
/* Define to 1 if you have the `issetugid' function. */
/* #undef _EVENT_HAVE_ISSETUGID */
/* Define to 1 if you have the `kqueue' function. */
/* #undef _EVENT_HAVE_KQUEUE */
/* Define to 1 if you have the `nsl' library (-lnsl). */
#define _EVENT_HAVE_LIBNSL 1
/* Define to 1 if you have the `resolv' library (-lresolv). */
#define _EVENT_HAVE_LIBRESOLV 1
/* Define to 1 if you have the `rt' library (-lrt). */
#define _EVENT_HAVE_LIBRT 1
/* Define to 1 if you have the `socket' library (-lsocket). */
/* #undef _EVENT_HAVE_LIBSOCKET */
/* Define to 1 if you have the <memory.h> header file. */
#define _EVENT_HAVE_MEMORY_H 1
/* Define to 1 if you have the <netinet/in6.h> header file. */
/* #undef _EVENT_HAVE_NETINET_IN6_H */
/* Define to 1 if you have the `poll' function. */
#define _EVENT_HAVE_POLL 1
/* Define to 1 if you have the <poll.h> header file. */
#define _EVENT_HAVE_POLL_H 1
/* Define to 1 if you have the `port_create' function. */
/* #undef _EVENT_HAVE_PORT_CREATE */
/* Define to 1 if you have the <port.h> header file. */
/* #undef _EVENT_HAVE_PORT_H */
/* Define to 1 if you have the `select' function. */
#define _EVENT_HAVE_SELECT 1
/* Define if F_SETFD is defined in <fcntl.h> */
#define _EVENT_HAVE_SETFD 1
/* Define to 1 if you have the `sigaction' function. */
#define _EVENT_HAVE_SIGACTION 1
/* Define to 1 if you have the `signal' function. */
#define _EVENT_HAVE_SIGNAL 1
/* Define to 1 if you have the <signal.h> header file. */
#define _EVENT_HAVE_SIGNAL_H 1
/* Define to 1 if you have the <stdarg.h> header file. */
#define _EVENT_HAVE_STDARG_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define _EVENT_HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define _EVENT_HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define _EVENT_HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define _EVENT_HAVE_STRING_H 1
/* Define to 1 if you have the `strlcpy' function. */
#define _EVENT_HAVE_STRLCPY 1
/* Define to 1 if you have the `strsep' function. */
#define _EVENT_HAVE_STRSEP 1
/* Define to 1 if you have the `strtok_r' function. */
#define _EVENT_HAVE_STRTOK_R 1
/* Define to 1 if you have the `strtoll' function. */
#define _EVENT_HAVE_STRTOLL 1
/* Define to 1 if the system has the type `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
/* Define to 1 if you have the <sys/devpoll.h> header file. */
/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
/* Define to 1 if you have the <sys/epoll.h> header file. */
#define _EVENT_HAVE_SYS_EPOLL_H 1
/* Define to 1 if you have the <sys/event.h> header file. */
/* #undef _EVENT_HAVE_SYS_EVENT_H */
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define _EVENT_HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define _EVENT_HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/queue.h> header file. */
#define _EVENT_HAVE_SYS_QUEUE_H 1
/* Define to 1 if you have the <sys/select.h> header file. */
#define _EVENT_HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define _EVENT_HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define _EVENT_HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define _EVENT_HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define _EVENT_HAVE_SYS_TYPES_H 1
/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
#define _EVENT_HAVE_TAILQFOREACH 1
/* Define if timeradd is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERADD 1
/* Define if timerclear is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERCLEAR 1
/* Define if timercmp is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERCMP 1
/* Define if timerisset is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERISSET 1
/* Define to 1 if the system has the type `uint16_t'. */
#define _EVENT_HAVE_UINT16_T 1
/* Define to 1 if the system has the type `uint32_t'. */
#define _EVENT_HAVE_UINT32_T 1
/* Define to 1 if the system has the type `uint64_t'. */
#define _EVENT_HAVE_UINT64_T 1
/* Define to 1 if the system has the type `uint8_t'. */
#define _EVENT_HAVE_UINT8_T 1
/* Define to 1 if you have the <unistd.h> header file. */
#define _EVENT_HAVE_UNISTD_H 1
/* Define to 1 if you have the `vasprintf' function. */
#define _EVENT_HAVE_VASPRINTF 1
/* Define if kqueue works correctly with pipes */
/* #undef _EVENT_HAVE_WORKING_KQUEUE */
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
#define _EVENT_LT_OBJDIR ".libs/"
/* Numeric representation of the version */
#define _EVENT_NUMERIC_VERSION 0x01040f00
/* Name of package */
#define _EVENT_PACKAGE "libevent"
/* Define to the address where bug reports for this package should be sent. */
#define _EVENT_PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define _EVENT_PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define _EVENT_PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define _EVENT_PACKAGE_TARNAME ""
/* Define to the home page for this package. */
#define _EVENT_PACKAGE_URL ""
/* Define to the version of this package. */
#define _EVENT_PACKAGE_VERSION ""
/* The size of `int', as computed by sizeof. */
#define _EVENT_SIZEOF_INT 4
/* The size of `long', as computed by sizeof. */
#define _EVENT_SIZEOF_LONG 8
/* The size of `long long', as computed by sizeof. */
#define _EVENT_SIZEOF_LONG_LONG 8
/* The size of `short', as computed by sizeof. */
#define _EVENT_SIZEOF_SHORT 2
/* Define to 1 if you have the ANSI C header files. */
#define _EVENT_STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define _EVENT_TIME_WITH_SYS_TIME 1
/* Version number of package */
#define _EVENT_VERSION "1.4.15"
/* Define to appropriate substitue if compiler doesnt have __func__ */
/* #undef _EVENT___func__ */
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef _EVENT_const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef _EVENT___cplusplus
/* #undef _EVENT_inline */
#endif
/* Define to `int' if <sys/types.h> does not define. */
/* #undef _EVENT_pid_t */
/* Define to `unsigned int' if <sys/types.h> does not define. */
/* #undef _EVENT_size_t */
/* Define to unsigned int if you dont have it */
/* #undef _EVENT_socklen_t */
#endif

View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_INTERNAL_H_
#define _EVENT_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "config.h"
#include "min_heap.h"
#include "evsignal.h"
struct eventop {
const char *name;
void *(*init)(struct event_base *);
int (*add)(void *, struct event *);
int (*del)(void *, struct event *);
int (*dispatch)(struct event_base *, void *, struct timeval *);
void (*dealloc)(struct event_base *, void *);
/* set if we need to reinitialize the event base */
int need_reinit;
};
struct event_base {
const struct eventop *evsel;
void *evbase;
int event_count; /* counts number of total events */
int event_count_active; /* counts number of active events */
int event_gotterm; /* Set to terminate loop */
int event_break; /* Set to terminate loop immediately */
/* active event management */
struct event_list **activequeues;
int nactivequeues;
/* signal handling info */
struct evsignal_info sig;
struct event_list eventqueue;
struct timeval event_tv;
struct min_heap timeheap;
struct timeval tv_cache;
};
/* Internal use only: Functions that might be missing from <sys/queue.h> */
#ifndef HAVE_TAILQFOREACH
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_FOREACH(var, head, field) \
for((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#endif /* TAILQ_FOREACH */
int _evsignal_set_handler(struct event_base *base, int evsignal,
void (*fn)(int));
int _evsignal_restore_handler(struct event_base *base, int evsignal);
/* defined in evutil.c */
const char *evutil_getenv(const char *varname);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT_INTERNAL_H_ */

View file

@ -0,0 +1,624 @@
.\" $OpenBSD: event.3,v 1.4 2002/07/12 18:50:48 provos Exp $
.\"
.\" Copyright (c) 2000 Artur Grabowski <art@openbsd.org>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\"
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\" 3. The name of the author may not be used to endorse or promote products
.\" derived from this software without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd August 8, 2000
.Dt EVENT 3
.Os
.Sh NAME
.Nm event_init ,
.Nm event_dispatch ,
.Nm event_loop ,
.Nm event_loopexit ,
.Nm event_loopbreak ,
.Nm event_set ,
.Nm event_base_dispatch ,
.Nm event_base_loop ,
.Nm event_base_loopexit ,
.Nm event_base_loopbreak ,
.Nm event_base_set ,
.Nm event_base_free ,
.Nm event_add ,
.Nm event_del ,
.Nm event_once ,
.Nm event_base_once ,
.Nm event_pending ,
.Nm event_initialized ,
.Nm event_priority_init ,
.Nm event_priority_set ,
.Nm evtimer_set ,
.Nm evtimer_add ,
.Nm evtimer_del ,
.Nm evtimer_pending ,
.Nm evtimer_initialized ,
.Nm signal_set ,
.Nm signal_add ,
.Nm signal_del ,
.Nm signal_pending ,
.Nm signal_initialized ,
.Nm bufferevent_new ,
.Nm bufferevent_free ,
.Nm bufferevent_write ,
.Nm bufferevent_write_buffer ,
.Nm bufferevent_read ,
.Nm bufferevent_enable ,
.Nm bufferevent_disable ,
.Nm bufferevent_settimeout ,
.Nm bufferevent_base_set ,
.Nm evbuffer_new ,
.Nm evbuffer_free ,
.Nm evbuffer_add ,
.Nm evbuffer_add_buffer ,
.Nm evbuffer_add_printf ,
.Nm evbuffer_add_vprintf ,
.Nm evbuffer_drain ,
.Nm evbuffer_write ,
.Nm evbuffer_read ,
.Nm evbuffer_find ,
.Nm evbuffer_readline ,
.Nm evhttp_new ,
.Nm evhttp_bind_socket ,
.Nm evhttp_free
.Nd execute a function when a specific event occurs
.Sh SYNOPSIS
.Fd #include <sys/time.h>
.Fd #include <event.h>
.Ft "struct event_base *"
.Fn "event_init" "void"
.Ft int
.Fn "event_dispatch" "void"
.Ft int
.Fn "event_loop" "int flags"
.Ft int
.Fn "event_loopexit" "struct timeval *tv"
.Ft int
.Fn "event_loopbreak" "void"
.Ft void
.Fn "event_set" "struct event *ev" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg"
.Ft int
.Fn "event_base_dispatch" "struct event_base *base"
.Ft int
.Fn "event_base_loop" "struct event_base *base" "int flags"
.Ft int
.Fn "event_base_loopexit" "struct event_base *base" "struct timeval *tv"
.Ft int
.Fn "event_base_loopbreak" "struct event_base *base"
.Ft int
.Fn "event_base_set" "struct event_base *base" "struct event *"
.Ft void
.Fn "event_base_free" "struct event_base *base"
.Ft int
.Fn "event_add" "struct event *ev" "struct timeval *tv"
.Ft int
.Fn "event_del" "struct event *ev"
.Ft int
.Fn "event_once" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
.Ft int
.Fn "event_base_once" "struct event_base *base" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
.Ft int
.Fn "event_pending" "struct event *ev" "short event" "struct timeval *tv"
.Ft int
.Fn "event_initialized" "struct event *ev"
.Ft int
.Fn "event_priority_init" "int npriorities"
.Ft int
.Fn "event_priority_set" "struct event *ev" "int priority"
.Ft void
.Fn "evtimer_set" "struct event *ev" "void (*fn)(int, short, void *)" "void *arg"
.Ft void
.Fn "evtimer_add" "struct event *ev" "struct timeval *"
.Ft void
.Fn "evtimer_del" "struct event *ev"
.Ft int
.Fn "evtimer_pending" "struct event *ev" "struct timeval *tv"
.Ft int
.Fn "evtimer_initialized" "struct event *ev"
.Ft void
.Fn "signal_set" "struct event *ev" "int signal" "void (*fn)(int, short, void *)" "void *arg"
.Ft void
.Fn "signal_add" "struct event *ev" "struct timeval *"
.Ft void
.Fn "signal_del" "struct event *ev"
.Ft int
.Fn "signal_pending" "struct event *ev" "struct timeval *tv"
.Ft int
.Fn "signal_initialized" "struct event *ev"
.Ft "struct bufferevent *"
.Fn "bufferevent_new" "int fd" "evbuffercb readcb" "evbuffercb writecb" "everrorcb" "void *cbarg"
.Ft void
.Fn "bufferevent_free" "struct bufferevent *bufev"
.Ft int
.Fn "bufferevent_write" "struct bufferevent *bufev" "void *data" "size_t size"
.Ft int
.Fn "bufferevent_write_buffer" "struct bufferevent *bufev" "struct evbuffer *buf"
.Ft size_t
.Fn "bufferevent_read" "struct bufferevent *bufev" "void *data" "size_t size"
.Ft int
.Fn "bufferevent_enable" "struct bufferevent *bufev" "short event"
.Ft int
.Fn "bufferevent_disable" "struct bufferevent *bufev" "short event"
.Ft void
.Fn "bufferevent_settimeout" "struct bufferevent *bufev" "int timeout_read" "int timeout_write"
.Ft int
.Fn "bufferevent_base_set" "struct event_base *base" "struct bufferevent *bufev"
.Ft "struct evbuffer *"
.Fn "evbuffer_new" "void"
.Ft void
.Fn "evbuffer_free" "struct evbuffer *buf"
.Ft int
.Fn "evbuffer_add" "struct evbuffer *buf" "const void *data" "size_t size"
.Ft int
.Fn "evbuffer_add_buffer" "struct evbuffer *dst" "struct evbuffer *src"
.Ft int
.Fn "evbuffer_add_printf" "struct evbuffer *buf" "const char *fmt" "..."
.Ft int
.Fn "evbuffer_add_vprintf" "struct evbuffer *buf" "const char *fmt" "va_list ap"
.Ft void
.Fn "evbuffer_drain" "struct evbuffer *buf" "size_t size"
.Ft int
.Fn "evbuffer_write" "struct evbuffer *buf" "int fd"
.Ft int
.Fn "evbuffer_read" "struct evbuffer *buf" "int fd" "int size"
.Ft "u_char *"
.Fn "evbuffer_find" "struct evbuffer *buf" "const u_char *data" "size_t size"
.Ft "char *"
.Fn "evbuffer_readline" "struct evbuffer *buf"
.Ft "struct evhttp *"
.Fn "evhttp_new" "struct event_base *base"
.Ft int
.Fn "evhttp_bind_socket" "struct evhttp *http" "const char *address" "u_short port"
.Ft "void"
.Fn "evhttp_free" "struct evhttp *http"
.Ft int
.Fa (*event_sigcb)(void) ;
.Ft volatile sig_atomic_t
.Fa event_gotsig ;
.Sh DESCRIPTION
The
.Nm event
API provides a mechanism to execute a function when a specific event
on a file descriptor occurs or after a given time has passed.
.Pp
The
.Nm event
API needs to be initialized with
.Fn event_init
before it can be used.
.Pp
In order to process events, an application needs to call
.Fn event_dispatch .
This function only returns on error, and should replace the event core
of the application program.
.Pp
The function
.Fn event_set
prepares the event structure
.Fa ev
to be used in future calls to
.Fn event_add
and
.Fn event_del .
The event will be prepared to call the function specified by the
.Fa fn
argument with an
.Fa int
argument indicating the file descriptor, a
.Fa short
argument indicating the type of event, and a
.Fa void *
argument given in the
.Fa arg
argument.
The
.Fa fd
indicates the file descriptor that should be monitored for events.
The events can be either
.Va EV_READ ,
.Va EV_WRITE ,
or both,
indicating that an application can read or write from the file descriptor
respectively without blocking.
.Pp
The function
.Fa fn
will be called with the file descriptor that triggered the event and
the type of event which will be either
.Va EV_TIMEOUT ,
.Va EV_SIGNAL ,
.Va EV_READ ,
or
.Va EV_WRITE .
Additionally, an event which has registered interest in more than one of the
preceeding events, via bitwise-OR to
.Fn event_set ,
can provide its callback function with a bitwise-OR of more than one triggered
event.
The additional flag
.Va EV_PERSIST
makes an
.Fn event_add
persistent until
.Fn event_del
has been called.
.Pp
Once initialized, the
.Fa ev
structure can be used repeatedly with
.Fn event_add
and
.Fn event_del
and does not need to be reinitialized unless the function called and/or
the argument to it are to be changed.
However, when an
.Fa ev
structure has been added to libevent using
.Fn event_add
the structure must persist until the event occurs (assuming
.Fa EV_PERSIST
is not set) or is removed
using
.Fn event_del .
You may not reuse the same
.Fa ev
structure for multiple monitored descriptors; each descriptor
needs its own
.Fa ev .
.Pp
The function
.Fn event_add
schedules the execution of the
.Fa ev
event when the event specified in
.Fn event_set
occurs or in at least the time specified in the
.Fa tv .
If
.Fa tv
is
.Dv NULL ,
no timeout occurs and the function will only be called
if a matching event occurs on the file descriptor.
The event in the
.Fa ev
argument must be already initialized by
.Fn event_set
and may not be used in calls to
.Fn event_set
until it has timed out or been removed with
.Fn event_del .
If the event in the
.Fa ev
argument already has a scheduled timeout, the old timeout will be
replaced by the new one.
.Pp
The function
.Fn event_del
will cancel the event in the argument
.Fa ev .
If the event has already executed or has never been added
the call will have no effect.
.Pp
The functions
.Fn evtimer_set ,
.Fn evtimer_add ,
.Fn evtimer_del ,
.Fn evtimer_initialized ,
and
.Fn evtimer_pending
are abbreviations for common situations where only a timeout is required.
The file descriptor passed will be \-1, and the event type will be
.Va EV_TIMEOUT .
.Pp
The functions
.Fn signal_set ,
.Fn signal_add ,
.Fn signal_del ,
.Fn signal_initialized ,
and
.Fn signal_pending
are abbreviations.
The event type will be a persistent
.Va EV_SIGNAL .
That means
.Fn signal_set
adds
.Va EV_PERSIST .
.Pp
In order to avoid races in signal handlers, the
.Nm event
API provides two variables:
.Va event_sigcb
and
.Va event_gotsig .
A signal handler
sets
.Va event_gotsig
to indicate that a signal has been received.
The application sets
.Va event_sigcb
to a callback function.
After the signal handler sets
.Va event_gotsig ,
.Nm event_dispatch
will execute the callback function to process received signals.
The callback returns 1 when no events are registered any more.
It can return \-1 to indicate an error to the
.Nm event
library, causing
.Fn event_dispatch
to terminate with
.Va errno
set to
.Er EINTR .
.Pp
The function
.Fn event_once
is similar to
.Fn event_set .
However, it schedules a callback to be called exactly once and does not
require the caller to prepare an
.Fa event
structure.
This function supports
.Fa EV_TIMEOUT ,
.Fa EV_READ ,
and
.Fa EV_WRITE .
.Pp
The
.Fn event_pending
function can be used to check if the event specified by
.Fa event
is pending to run.
If
.Va EV_TIMEOUT
was specified and
.Fa tv
is not
.Dv NULL ,
the expiration time of the event will be returned in
.Fa tv .
.Pp
The
.Fn event_initialized
macro can be used to check if an event has been initialized.
.Pp
The
.Nm event_loop
function provides an interface for single pass execution of pending
events.
The flags
.Va EVLOOP_ONCE
and
.Va EVLOOP_NONBLOCK
are recognized.
The
.Nm event_loopexit
function exits from the event loop. The next
.Fn event_loop
iteration after the
given timer expires will complete normally (handling all queued events) then
exit without blocking for events again. Subsequent invocations of
.Fn event_loop
will proceed normally.
The
.Nm event_loopbreak
function exits from the event loop immediately.
.Fn event_loop
will abort after the next event is completed;
.Fn event_loopbreak
is typically invoked from this event's callback. This behavior is analogous
to the "break;" statement. Subsequent invocations of
.Fn event_loop
will proceed normally.
.Pp
It is the responsibility of the caller to provide these functions with
pre-allocated event structures.
.Pp
.Sh EVENT PRIORITIES
By default
.Nm libevent
schedules all active events with the same priority.
However, sometimes it is desirable to process some events with a higher
priority than others.
For that reason,
.Nm libevent
supports strict priority queues.
Active events with a lower priority are always processed before events
with a higher priority.
.Pp
The number of different priorities can be set initially with the
.Fn event_priority_init
function.
This function should be called before the first call to
.Fn event_dispatch .
The
.Fn event_priority_set
function can be used to assign a priority to an event.
By default,
.Nm libevent
assigns the middle priority to all events unless their priority
is explicitly set.
.Sh THREAD SAFE EVENTS
.Nm Libevent
has experimental support for thread-safe events.
When initializing the library via
.Fn event_init ,
an event base is returned.
This event base can be used in conjunction with calls to
.Fn event_base_set ,
.Fn event_base_dispatch ,
.Fn event_base_loop ,
.Fn event_base_loopexit ,
.Fn bufferevent_base_set
and
.Fn event_base_free .
.Fn event_base_set
should be called after preparing an event with
.Fn event_set ,
as
.Fn event_set
assigns the provided event to the most recently created event base.
.Fn bufferevent_base_set
should be called after preparing a bufferevent with
.Fn bufferevent_new .
.Fn event_base_free
should be used to free memory associated with the event base
when it is no longer needed.
.Sh BUFFERED EVENTS
.Nm libevent
provides an abstraction on top of the regular event callbacks.
This abstraction is called a
.Va "buffered event" .
A buffered event provides input and output buffers that get filled
and drained automatically.
The user of a buffered event no longer deals directly with the IO,
but instead is reading from input and writing to output buffers.
.Pp
A new bufferevent is created by
.Fn bufferevent_new .
The parameter
.Fa fd
specifies the file descriptor from which data is read and written to.
This file descriptor is not allowed to be a
.Xr pipe 2 .
The next three parameters are callbacks.
The read and write callback have the following form:
.Ft void
.Fn "(*cb)" "struct bufferevent *bufev" "void *arg" .
The error callback has the following form:
.Ft void
.Fn "(*cb)" "struct bufferevent *bufev" "short what" "void *arg" .
The argument is specified by the fourth parameter
.Fa "cbarg" .
A
.Fa bufferevent struct
pointer is returned on success, NULL on error.
Both the read and the write callback may be NULL.
The error callback has to be always provided.
.Pp
Once initialized, the bufferevent structure can be used repeatedly with
bufferevent_enable() and bufferevent_disable().
The flags parameter can be a combination of
.Va EV_READ
and
.Va EV_WRITE .
When read enabled the bufferevent will try to read from the file
descriptor and call the read callback.
The write callback is executed
whenever the output buffer is drained below the write low watermark,
which is
.Va 0
by default.
.Pp
The
.Fn bufferevent_write
function can be used to write data to the file descriptor.
The data is appended to the output buffer and written to the descriptor
automatically as it becomes available for writing.
.Fn bufferevent_write
returns 0 on success or \-1 on failure.
The
.Fn bufferevent_read
function is used to read data from the input buffer,
returning the amount of data read.
.Pp
If multiple bases are in use, bufferevent_base_set() must be called before
enabling the bufferevent for the first time.
.Sh NON-BLOCKING HTTP SUPPORT
.Nm libevent
provides a very thin HTTP layer that can be used both to host an HTTP
server and also to make HTTP requests.
An HTTP server can be created by calling
.Fn evhttp_new .
It can be bound to any port and address with the
.Fn evhttp_bind_socket
function.
When the HTTP server is no longer used, it can be freed via
.Fn evhttp_free .
.Pp
To be notified of HTTP requests, a user needs to register callbacks with the
HTTP server.
This can be done by calling
.Fn evhttp_set_cb .
The second argument is the URI for which a callback is being registered.
The corresponding callback will receive an
.Va struct evhttp_request
object that contains all information about the request.
.Pp
This section does not document all the possible function calls; please
check
.Va event.h
for the public interfaces.
.Sh ADDITIONAL NOTES
It is possible to disable support for
.Va epoll , kqueue , devpoll , poll
or
.Va select
by setting the environment variable
.Va EVENT_NOEPOLL , EVENT_NOKQUEUE , EVENT_NODEVPOLL , EVENT_NOPOLL
or
.Va EVENT_NOSELECT ,
respectively.
By setting the environment variable
.Va EVENT_SHOW_METHOD ,
.Nm libevent
displays the kernel notification method that it uses.
.Sh RETURN VALUES
Upon successful completion
.Fn event_add
and
.Fn event_del
return 0.
Otherwise, \-1 is returned and the global variable errno is
set to indicate the error.
.Sh SEE ALSO
.Xr kqueue 2 ,
.Xr poll 2 ,
.Xr select 2 ,
.Xr evdns 3 ,
.Xr timeout 9
.Sh HISTORY
The
.Nm event
API manpage is based on the
.Xr timeout 9
manpage by Artur Grabowski.
The port of
.Nm libevent
to Windows is due to Michael A. Davis.
Support for real-time signals is due to Taral.
.Sh AUTHORS
The
.Nm event
library was written by Niels Provos.
.Sh BUGS
This documentation is neither complete nor authoritative.
If you are in doubt about the usage of this API then
check the source code to find out how it works, write
up the missing piece of documentation and send it to
me for inclusion in this man page.

View file

@ -0,0 +1,998 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include "event.h"
#include "event-internal.h"
#include "evutil.h"
#include "log.h"
#ifdef HAVE_EVENT_PORTS
extern const struct eventop evportops;
#endif
#ifdef HAVE_SELECT
extern const struct eventop selectops;
#endif
#ifdef HAVE_POLL
extern const struct eventop pollops;
#endif
#ifdef HAVE_EPOLL
extern const struct eventop epollops;
#endif
#ifdef HAVE_WORKING_KQUEUE
extern const struct eventop kqops;
#endif
#ifdef HAVE_DEVPOLL
extern const struct eventop devpollops;
#endif
#ifdef WIN32
extern const struct eventop win32ops;
#endif
/* In order of preference */
static const struct eventop *eventops[] = {
#ifdef HAVE_EVENT_PORTS
&evportops,
#endif
#ifdef HAVE_WORKING_KQUEUE
&kqops,
#endif
#ifdef HAVE_EPOLL
&epollops,
#endif
#ifdef HAVE_DEVPOLL
&devpollops,
#endif
#ifdef HAVE_POLL
&pollops,
#endif
#ifdef HAVE_SELECT
&selectops,
#endif
#ifdef WIN32
&win32ops,
#endif
NULL
};
/* Global state */
struct event_base *current_base = NULL;
extern struct event_base *evsignal_base;
static int use_monotonic = 1;
/* Prototypes */
static void event_queue_insert(struct event_base *, struct event *, int);
static void event_queue_remove(struct event_base *, struct event *, int);
static int event_haveevents(struct event_base *);
static void event_process_active(struct event_base *);
static int timeout_next(struct event_base *, struct timeval **);
static void timeout_process(struct event_base *);
static void timeout_correct(struct event_base *, struct timeval *);
static int
gettime(struct event_base *base, struct timeval *tp)
{
if (base->tv_cache.tv_sec) {
*tp = base->tv_cache;
return (0);
}
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
struct timespec ts;
if (use_monotonic &&
clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
tp->tv_sec = ts.tv_sec;
tp->tv_usec = ts.tv_nsec / 1000;
return (0);
}
#endif
use_monotonic = 0;
return (evutil_gettimeofday(tp, NULL));
}
struct event_base *
event_init(void)
{
struct event_base *base = event_base_new();
if (base != NULL)
current_base = base;
return (base);
}
struct event_base *
event_base_new(void)
{
int i;
struct event_base *base;
if ((base = calloc(1, sizeof(struct event_base))) == NULL)
event_err(1, "%s: calloc", __func__);
gettime(base, &base->event_tv);
min_heap_ctor(&base->timeheap);
TAILQ_INIT(&base->eventqueue);
base->sig.ev_signal_pair[0] = -1;
base->sig.ev_signal_pair[1] = -1;
base->evbase = NULL;
for (i = 0; eventops[i] && !base->evbase; i++) {
base->evsel = eventops[i];
base->evbase = base->evsel->init(base);
}
if (base->evbase == NULL)
event_errx(1, "%s: no event mechanism available", __func__);
if (evutil_getenv("EVENT_SHOW_METHOD"))
event_msgx("libevent using: %s\n",
base->evsel->name);
/* allocate a single active event queue */
event_base_priority_init(base, 1);
return (base);
}
void
event_base_free(struct event_base *base)
{
int i, n_deleted=0;
struct event *ev;
if (base == NULL && current_base)
base = current_base;
if (base == current_base)
current_base = NULL;
/* XXX(niels) - check for internal events first */
assert(base);
/* Delete all non-internal events. */
for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
struct event *next = TAILQ_NEXT(ev, ev_next);
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
event_del(ev);
++n_deleted;
}
ev = next;
}
while ((ev = min_heap_top(&base->timeheap)) != NULL) {
event_del(ev);
++n_deleted;
}
for (i = 0; i < base->nactivequeues; ++i) {
for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
struct event *next = TAILQ_NEXT(ev, ev_active_next);
if (!(ev->ev_flags & EVLIST_INTERNAL)) {
event_del(ev);
++n_deleted;
}
ev = next;
}
}
if (n_deleted)
event_debug(("%s: %d events were still set in base",
__func__, n_deleted));
if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base, base->evbase);
for (i = 0; i < base->nactivequeues; ++i)
assert(TAILQ_EMPTY(base->activequeues[i]));
assert(min_heap_empty(&base->timeheap));
min_heap_dtor(&base->timeheap);
for (i = 0; i < base->nactivequeues; ++i)
free(base->activequeues[i]);
free(base->activequeues);
assert(TAILQ_EMPTY(&base->eventqueue));
free(base);
}
/* reinitialized the event base after a fork */
int
event_reinit(struct event_base *base)
{
const struct eventop *evsel = base->evsel;
void *evbase = base->evbase;
int res = 0;
struct event *ev;
#if 0
/* Right now, reinit always takes effect, since even if the
backend doesn't require it, the signal socketpair code does.
*/
/* check if this event mechanism requires reinit */
if (!evsel->need_reinit)
return (0);
#endif
/* prevent internal delete */
if (base->sig.ev_signal_added) {
/* we cannot call event_del here because the base has
* not been reinitialized yet. */
event_queue_remove(base, &base->sig.ev_signal,
EVLIST_INSERTED);
if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
event_queue_remove(base, &base->sig.ev_signal,
EVLIST_ACTIVE);
base->sig.ev_signal_added = 0;
}
if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base, base->evbase);
evbase = base->evbase = evsel->init(base);
if (base->evbase == NULL)
event_errx(1, "%s: could not reinitialize event mechanism",
__func__);
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
if (evsel->add(evbase, ev) == -1)
res = -1;
}
return (res);
}
int
event_priority_init(int npriorities)
{
return event_base_priority_init(current_base, npriorities);
}
int
event_base_priority_init(struct event_base *base, int npriorities)
{
int i;
if (base->event_count_active)
return (-1);
if (npriorities == base->nactivequeues)
return (0);
if (base->nactivequeues) {
for (i = 0; i < base->nactivequeues; ++i) {
free(base->activequeues[i]);
}
free(base->activequeues);
}
/* Allocate our priority queues */
base->nactivequeues = npriorities;
base->activequeues = (struct event_list **)
calloc(base->nactivequeues, sizeof(struct event_list *));
if (base->activequeues == NULL)
event_err(1, "%s: calloc", __func__);
for (i = 0; i < base->nactivequeues; ++i) {
base->activequeues[i] = malloc(sizeof(struct event_list));
if (base->activequeues[i] == NULL)
event_err(1, "%s: malloc", __func__);
TAILQ_INIT(base->activequeues[i]);
}
return (0);
}
int
event_haveevents(struct event_base *base)
{
return (base->event_count > 0);
}
/*
* Active events are stored in priority queues. Lower priorities are always
* process before higher priorities. Low priority events can starve high
* priority ones.
*/
static void
event_process_active(struct event_base *base)
{
struct event *ev;
struct event_list *activeq = NULL;
int i;
short ncalls;
for (i = 0; i < base->nactivequeues; ++i) {
if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
activeq = base->activequeues[i];
break;
}
}
assert(activeq != NULL);
for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
if (ev->ev_events & EV_PERSIST)
event_queue_remove(base, ev, EVLIST_ACTIVE);
else
event_del(ev);
/* Allows deletes to work */
ncalls = ev->ev_ncalls;
ev->ev_pncalls = &ncalls;
while (ncalls) {
ncalls--;
ev->ev_ncalls = ncalls;
(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
if (base->event_break)
return;
}
}
}
/*
* Wait continously for events. We exit only if no events are left.
*/
int
event_dispatch(void)
{
return (event_loop(0));
}
int
event_base_dispatch(struct event_base *event_base)
{
return (event_base_loop(event_base, 0));
}
const char *
event_base_get_method(struct event_base *base)
{
assert(base);
return (base->evsel->name);
}
static void
event_loopexit_cb(int fd, short what, void *arg)
{
struct event_base *base = arg;
base->event_gotterm = 1;
}
/* not thread safe */
int
event_loopexit(const struct timeval *tv)
{
return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
current_base, tv));
}
int
event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
{
return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
event_base, tv));
}
/* not thread safe */
int
event_loopbreak(void)
{
return (event_base_loopbreak(current_base));
}
int
event_base_loopbreak(struct event_base *event_base)
{
if (event_base == NULL)
return (-1);
event_base->event_break = 1;
return (0);
}
/* not thread safe */
int
event_loop(int flags)
{
return event_base_loop(current_base, flags);
}
int
event_base_loop(struct event_base *base, int flags)
{
const struct eventop *evsel = base->evsel;
void *evbase = base->evbase;
struct timeval tv;
struct timeval *tv_p;
int res, done;
/* clear time cache */
base->tv_cache.tv_sec = 0;
if (base->sig.ev_signal_added)
evsignal_base = base;
done = 0;
while (!done) {
/* Terminate the loop if we have been asked to */
if (base->event_gotterm) {
base->event_gotterm = 0;
break;
}
if (base->event_break) {
base->event_break = 0;
break;
}
timeout_correct(base, &tv);
tv_p = &tv;
if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
timeout_next(base, &tv_p);
} else {
/*
* if we have active events, we just poll new events
* without waiting.
*/
evutil_timerclear(&tv);
}
/* If we have no events, we just exit */
if (!event_haveevents(base)) {
event_debug(("%s: no events registered.", __func__));
return (1);
}
/* update last old time */
gettime(base, &base->event_tv);
/* clear time cache */
base->tv_cache.tv_sec = 0;
res = evsel->dispatch(base, evbase, tv_p);
if (res == -1)
return (-1);
gettime(base, &base->tv_cache);
timeout_process(base);
if (base->event_count_active) {
event_process_active(base);
if (!base->event_count_active && (flags & EVLOOP_ONCE))
done = 1;
} else if (flags & EVLOOP_NONBLOCK)
done = 1;
}
/* clear time cache */
base->tv_cache.tv_sec = 0;
event_debug(("%s: asked to terminate loop.", __func__));
return (0);
}
/* Sets up an event for processing once */
struct event_once {
struct event ev;
void (*cb)(int, short, void *);
void *arg;
};
/* One-time callback, it deletes itself */
static void
event_once_cb(int fd, short events, void *arg)
{
struct event_once *eonce = arg;
(*eonce->cb)(fd, events, eonce->arg);
free(eonce);
}
/* not threadsafe, event scheduled once. */
int
event_once(int fd, short events,
void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
{
return event_base_once(current_base, fd, events, callback, arg, tv);
}
/* Schedules an event once */
int
event_base_once(struct event_base *base, int fd, short events,
void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
{
struct event_once *eonce;
struct timeval etv;
int res;
/* We cannot support signals that just fire once */
if (events & EV_SIGNAL)
return (-1);
if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
return (-1);
eonce->cb = callback;
eonce->arg = arg;
if (events == EV_TIMEOUT) {
if (tv == NULL) {
evutil_timerclear(&etv);
tv = &etv;
}
evtimer_set(&eonce->ev, event_once_cb, eonce);
} else if (events & (EV_READ|EV_WRITE)) {
events &= EV_READ|EV_WRITE;
event_set(&eonce->ev, fd, events, event_once_cb, eonce);
} else {
/* Bad event combination */
free(eonce);
return (-1);
}
res = event_base_set(base, &eonce->ev);
if (res == 0)
res = event_add(&eonce->ev, tv);
if (res != 0) {
free(eonce);
return (res);
}
return (0);
}
void
event_set(struct event *ev, int fd, short events,
void (*callback)(int, short, void *), void *arg)
{
/* Take the current base - caller needs to set the real base later */
ev->ev_base = current_base;
ev->ev_callback = callback;
ev->ev_arg = arg;
ev->ev_fd = fd;
ev->ev_events = events;
ev->ev_res = 0;
ev->ev_flags = EVLIST_INIT;
ev->ev_ncalls = 0;
ev->ev_pncalls = NULL;
min_heap_elem_init(ev);
/* by default, we put new events into the middle priority */
if(current_base)
ev->ev_pri = current_base->nactivequeues/2;
}
int
event_base_set(struct event_base *base, struct event *ev)
{
/* Only innocent events may be assigned to a different base */
if (ev->ev_flags != EVLIST_INIT)
return (-1);
ev->ev_base = base;
ev->ev_pri = base->nactivequeues/2;
return (0);
}
/*
* Set's the priority of an event - if an event is already scheduled
* changing the priority is going to fail.
*/
int
event_priority_set(struct event *ev, int pri)
{
if (ev->ev_flags & EVLIST_ACTIVE)
return (-1);
if (pri < 0 || pri >= ev->ev_base->nactivequeues)
return (-1);
ev->ev_pri = pri;
return (0);
}
/*
* Checks if a specific event is pending or scheduled.
*/
int
event_pending(struct event *ev, short event, struct timeval *tv)
{
struct timeval now, res;
int flags = 0;
if (ev->ev_flags & EVLIST_INSERTED)
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
if (ev->ev_flags & EVLIST_ACTIVE)
flags |= ev->ev_res;
if (ev->ev_flags & EVLIST_TIMEOUT)
flags |= EV_TIMEOUT;
event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
/* See if there is a timeout that we should report */
if (tv != NULL && (flags & event & EV_TIMEOUT)) {
gettime(ev->ev_base, &now);
evutil_timersub(&ev->ev_timeout, &now, &res);
/* correctly remap to real time */
evutil_gettimeofday(&now, NULL);
evutil_timeradd(&now, &res, tv);
}
return (flags & event);
}
int
event_add(struct event *ev, const struct timeval *tv)
{
struct event_base *base = ev->ev_base;
const struct eventop *evsel = base->evsel;
void *evbase = base->evbase;
int res = 0;
event_debug((
"event_add: event: %p, %s%s%scall %p",
ev,
ev->ev_events & EV_READ ? "EV_READ " : " ",
ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
tv ? "EV_TIMEOUT " : " ",
ev->ev_callback));
assert(!(ev->ev_flags & ~EVLIST_ALL));
/*
* prepare for timeout insertion further below, if we get a
* failure on any step, we should not change any state.
*/
if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
if (min_heap_reserve(&base->timeheap,
1 + min_heap_size(&base->timeheap)) == -1)
return (-1); /* ENOMEM == errno */
}
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
res = evsel->add(evbase, ev);
if (res != -1)
event_queue_insert(base, ev, EVLIST_INSERTED);
}
/*
* we should change the timout state only if the previous event
* addition succeeded.
*/
if (res != -1 && tv != NULL) {
struct timeval now;
/*
* we already reserved memory above for the case where we
* are not replacing an exisiting timeout.
*/
if (ev->ev_flags & EVLIST_TIMEOUT)
event_queue_remove(base, ev, EVLIST_TIMEOUT);
/* Check if it is active due to a timeout. Rescheduling
* this timeout before the callback can be executed
* removes it from the active list. */
if ((ev->ev_flags & EVLIST_ACTIVE) &&
(ev->ev_res & EV_TIMEOUT)) {
/* See if we are just active executing this
* event in a loop
*/
if (ev->ev_ncalls && ev->ev_pncalls) {
/* Abort loop */
*ev->ev_pncalls = 0;
}
event_queue_remove(base, ev, EVLIST_ACTIVE);
}
gettime(base, &now);
evutil_timeradd(&now, tv, &ev->ev_timeout);
event_debug((
"event_add: timeout in %ld seconds, call %p",
tv->tv_sec, ev->ev_callback));
event_queue_insert(base, ev, EVLIST_TIMEOUT);
}
return (res);
}
int
event_del(struct event *ev)
{
struct event_base *base;
event_debug(("event_del: %p, callback %p",
ev, ev->ev_callback));
/* An event without a base has not been added */
if (ev->ev_base == NULL)
return (-1);
base = ev->ev_base;
assert(!(ev->ev_flags & ~EVLIST_ALL));
/* See if we are just active executing this event in a loop */
if (ev->ev_ncalls && ev->ev_pncalls) {
/* Abort loop */
*ev->ev_pncalls = 0;
}
if (ev->ev_flags & EVLIST_TIMEOUT)
event_queue_remove(base, ev, EVLIST_TIMEOUT);
if (ev->ev_flags & EVLIST_ACTIVE)
event_queue_remove(base, ev, EVLIST_ACTIVE);
if (ev->ev_flags & EVLIST_INSERTED) {
event_queue_remove(base, ev, EVLIST_INSERTED);
return (base->evsel->del(base->evbase, ev));
}
return (0);
}
void
event_active(struct event *ev, int res, short ncalls)
{
/* We get different kinds of events, add them together */
if (ev->ev_flags & EVLIST_ACTIVE) {
ev->ev_res |= res;
return;
}
ev->ev_res = res;
ev->ev_ncalls = ncalls;
ev->ev_pncalls = NULL;
event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
}
static int
timeout_next(struct event_base *base, struct timeval **tv_p)
{
struct timeval now;
struct event *ev;
struct timeval *tv = *tv_p;
if ((ev = min_heap_top(&base->timeheap)) == NULL) {
/* if no time-based events are active wait for I/O */
*tv_p = NULL;
return (0);
}
if (gettime(base, &now) == -1)
return (-1);
if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
evutil_timerclear(tv);
return (0);
}
evutil_timersub(&ev->ev_timeout, &now, tv);
assert(tv->tv_sec >= 0);
assert(tv->tv_usec >= 0);
event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
return (0);
}
/*
* Determines if the time is running backwards by comparing the current
* time against the last time we checked. Not needed when using clock
* monotonic.
*/
static void
timeout_correct(struct event_base *base, struct timeval *tv)
{
struct event **pev;
unsigned int size;
struct timeval off;
if (use_monotonic)
return;
/* Check if time is running backwards */
gettime(base, tv);
if (evutil_timercmp(tv, &base->event_tv, >=)) {
base->event_tv = *tv;
return;
}
event_debug(("%s: time is running backwards, corrected",
__func__));
evutil_timersub(&base->event_tv, tv, &off);
/*
* We can modify the key element of the node without destroying
* the key, beause we apply it to all in the right order.
*/
pev = base->timeheap.p;
size = base->timeheap.n;
for (; size-- > 0; ++pev) {
struct timeval *ev_tv = &(**pev).ev_timeout;
evutil_timersub(ev_tv, &off, ev_tv);
}
/* Now remember what the new time turned out to be. */
base->event_tv = *tv;
}
void
timeout_process(struct event_base *base)
{
struct timeval now;
struct event *ev;
if (min_heap_empty(&base->timeheap))
return;
gettime(base, &now);
while ((ev = min_heap_top(&base->timeheap))) {
if (evutil_timercmp(&ev->ev_timeout, &now, >))
break;
/* delete this event from the I/O queues */
event_del(ev);
event_debug(("timeout_process: call %p",
ev->ev_callback));
event_active(ev, EV_TIMEOUT, 1);
}
}
void
event_queue_remove(struct event_base *base, struct event *ev, int queue)
{
if (!(ev->ev_flags & queue))
event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
ev, ev->ev_fd, queue);
if (~ev->ev_flags & EVLIST_INTERNAL)
base->event_count--;
ev->ev_flags &= ~queue;
switch (queue) {
case EVLIST_INSERTED:
TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
break;
case EVLIST_ACTIVE:
base->event_count_active--;
TAILQ_REMOVE(base->activequeues[ev->ev_pri],
ev, ev_active_next);
break;
case EVLIST_TIMEOUT:
min_heap_erase(&base->timeheap, ev);
break;
default:
event_errx(1, "%s: unknown queue %x", __func__, queue);
}
}
void
event_queue_insert(struct event_base *base, struct event *ev, int queue)
{
if (ev->ev_flags & queue) {
/* Double insertion is possible for active events */
if (queue & EVLIST_ACTIVE)
return;
event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
ev, ev->ev_fd, queue);
}
if (~ev->ev_flags & EVLIST_INTERNAL)
base->event_count++;
ev->ev_flags |= queue;
switch (queue) {
case EVLIST_INSERTED:
TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
break;
case EVLIST_ACTIVE:
base->event_count_active++;
TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
ev,ev_active_next);
break;
case EVLIST_TIMEOUT: {
min_heap_push(&base->timeheap, ev);
break;
}
default:
event_errx(1, "%s: unknown queue %x", __func__, queue);
}
}
/* Functions for debugging */
const char *
event_get_version(void)
{
return (VERSION);
}
/*
* No thread-safe interface needed - the information should be the same
* for all threads.
*/
const char *
event_get_method(void)
{
return (current_base->evsel->name);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,441 @@
/*
* Copyright (c) 2003, 2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#else
#include <sys/ioctl.h>
#endif
#include <sys/queue.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef WIN32
#include <syslog.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "event.h"
#include "evutil.h"
#include "log.h"
int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
static struct evbuffer *_buf; /* not thread safe */
void
evtag_init(void)
{
if (_buf != NULL)
return;
if ((_buf = evbuffer_new()) == NULL)
event_err(1, "%s: malloc", __func__);
}
/*
* We encode integer's by nibbles; the first nibble contains the number
* of significant nibbles - 1; this allows us to encode up to 64-bit
* integers. This function is byte-order independent.
*/
void
encode_int(struct evbuffer *evbuf, ev_uint32_t number)
{
int off = 1, nibbles = 0;
ev_uint8_t data[5];
memset(data, 0, sizeof(ev_uint32_t)+1);
while (number) {
if (off & 0x1)
data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f);
else
data[off/2] = (data[off/2] & 0x0f) |
((number & 0x0f) << 4);
number >>= 4;
off++;
}
if (off > 2)
nibbles = off - 2;
/* Off - 1 is the number of encoded nibbles */
data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4);
evbuffer_add(evbuf, data, (off + 1) / 2);
}
/*
* Support variable length encoding of tags; we use the high bit in each
* octet as a continuation signal.
*/
int
evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
{
int bytes = 0;
ev_uint8_t data[5];
memset(data, 0, sizeof(data));
do {
ev_uint8_t lower = tag & 0x7f;
tag >>= 7;
if (tag)
lower |= 0x80;
data[bytes++] = lower;
} while (tag);
if (evbuf != NULL)
evbuffer_add(evbuf, data, bytes);
return (bytes);
}
static int
decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
{
ev_uint32_t number = 0;
ev_uint8_t *data = EVBUFFER_DATA(evbuf);
int len = EVBUFFER_LENGTH(evbuf);
int count = 0, shift = 0, done = 0;
while (count++ < len) {
ev_uint8_t lower = *data++;
number |= (lower & 0x7f) << shift;
shift += 7;
if (!(lower & 0x80)) {
done = 1;
break;
}
}
if (!done)
return (-1);
if (dodrain)
evbuffer_drain(evbuf, count);
if (ptag != NULL)
*ptag = number;
return (count);
}
int
evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
{
return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
}
/*
* Marshal a data type, the general format is as follows:
*
* tag number: one byte; length: var bytes; payload: var bytes
*/
void
evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
const void *data, ev_uint32_t len)
{
evtag_encode_tag(evbuf, tag);
encode_int(evbuf, len);
evbuffer_add(evbuf, (void *)data, len);
}
/* Marshaling for integers */
void
evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
{
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
encode_int(_buf, integer);
evtag_encode_tag(evbuf, tag);
encode_int(evbuf, EVBUFFER_LENGTH(_buf));
evbuffer_add_buffer(evbuf, _buf);
}
void
evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
{
evtag_marshal(buf, tag, string, strlen(string));
}
void
evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
{
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
encode_int(_buf, tv->tv_sec);
encode_int(_buf, tv->tv_usec);
evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf),
EVBUFFER_LENGTH(_buf));
}
static int
decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int dodrain)
{
ev_uint32_t number = 0;
ev_uint8_t *data = EVBUFFER_DATA(evbuf);
int len = EVBUFFER_LENGTH(evbuf);
int nibbles = 0;
if (!len)
return (-1);
nibbles = ((data[0] & 0xf0) >> 4) + 1;
if (nibbles > 8 || (nibbles >> 1) + 1 > len)
return (-1);
len = (nibbles >> 1) + 1;
while (nibbles > 0) {
number <<= 4;
if (nibbles & 0x1)
number |= data[nibbles >> 1] & 0x0f;
else
number |= (data[nibbles >> 1] & 0xf0) >> 4;
nibbles--;
}
if (dodrain)
evbuffer_drain(evbuf, len);
*pnumber = number;
return (len);
}
int
evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
{
return (decode_int_internal(pnumber, evbuf, 1) == -1 ? -1 : 0);
}
int
evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
{
return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
}
int
evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
struct evbuffer tmp;
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
tmp = *evbuf;
tmp.buffer += len;
tmp.off -= len;
res = decode_int_internal(plength, &tmp, 0);
if (res == -1)
return (-1);
*plength += res + len;
return (0);
}
int
evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
struct evbuffer tmp;
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
tmp = *evbuf;
tmp.buffer += len;
tmp.off -= len;
res = decode_int_internal(plength, &tmp, 0);
if (res == -1)
return (-1);
return (0);
}
int
evtag_consume(struct evbuffer *evbuf)
{
ev_uint32_t len;
if (decode_tag_internal(NULL, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
evbuffer_drain(evbuf, len);
return (0);
}
/* Reads the data type from an event buffer */
int
evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
{
ev_uint32_t len;
ev_uint32_t integer;
if (decode_tag_internal(ptag, src, 1 /* dodrain */) == -1)
return (-1);
if (evtag_decode_int(&integer, src) == -1)
return (-1);
len = integer;
if (EVBUFFER_LENGTH(src) < len)
return (-1);
if (evbuffer_add(dst, EVBUFFER_DATA(src), len) == -1)
return (-1);
evbuffer_drain(src, len);
return (len);
}
/* Marshaling for integers */
int
evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint32_t *pinteger)
{
ev_uint32_t tag;
ev_uint32_t len;
ev_uint32_t integer;
if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (need_tag != tag)
return (-1);
if (evtag_decode_int(&integer, evbuf) == -1)
return (-1);
len = integer;
if (EVBUFFER_LENGTH(evbuf) < len)
return (-1);
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
if (evbuffer_add(_buf, EVBUFFER_DATA(evbuf), len) == -1)
return (-1);
evbuffer_drain(evbuf, len);
return (evtag_decode_int(pinteger, _buf));
}
/* Unmarshal a fixed length tag */
int
evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
size_t len)
{
ev_uint32_t tag;
/* Initialize this event buffer so that we can read into it */
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
/* Now unmarshal a tag and check that it matches the tag we want */
if (evtag_unmarshal(src, &tag, _buf) == -1 || tag != need_tag)
return (-1);
if (EVBUFFER_LENGTH(_buf) != len)
return (-1);
memcpy(data, EVBUFFER_DATA(_buf), len);
return (0);
}
int
evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
char **pstring)
{
ev_uint32_t tag;
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
return (-1);
*pstring = calloc(EVBUFFER_LENGTH(_buf) + 1, 1);
if (*pstring == NULL)
event_err(1, "%s: calloc", __func__);
evbuffer_remove(_buf, *pstring, EVBUFFER_LENGTH(_buf));
return (0);
}
int
evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct timeval *ptv)
{
ev_uint32_t tag;
ev_uint32_t integer;
evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
return (-1);
if (evtag_decode_int(&integer, _buf) == -1)
return (-1);
ptv->tv_sec = integer;
if (evtag_decode_int(&integer, _buf) == -1)
return (-1);
ptv->tv_usec = integer;
return (0);
}

View file

@ -0,0 +1,375 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVHTTP_H_
#define _EVHTTP_H_
#include "event.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
/** @file evhttp.h
*
* Basic support for HTTP serving.
*
* As libevent is a library for dealing with event notification and most
* interesting applications are networked today, I have often found the
* need to write HTTP code. The following prototypes and definitions provide
* an application with a minimal interface for making HTTP requests and for
* creating a very simple HTTP server.
*/
/* Response codes */
#define HTTP_OK 200
#define HTTP_NOCONTENT 204
#define HTTP_MOVEPERM 301
#define HTTP_MOVETEMP 302
#define HTTP_NOTMODIFIED 304
#define HTTP_BADREQUEST 400
#define HTTP_NOTFOUND 404
#define HTTP_SERVUNAVAIL 503
struct evhttp;
struct evhttp_request;
struct evkeyvalq;
/** Create a new HTTP server
*
* @param base (optional) the event base to receive the HTTP events
* @return a pointer to a newly initialized evhttp server structure
*/
struct evhttp *evhttp_new(struct event_base *base);
/**
* Binds an HTTP server on the specified address and port.
*
* Can be called multiple times to bind the same http server
* to multiple different ports.
*
* @param http a pointer to an evhttp object
* @param address a string containing the IP address to listen(2) on
* @param port the port number to listen on
* @return 0 on success, -1 on failure
* @see evhttp_free()
*/
int evhttp_bind_socket(struct evhttp *http, const char *address, u_short port);
/**
* Makes an HTTP server accept connections on the specified socket
*
* This may be useful to create a socket and then fork multiple instances
* of an http server, or when a socket has been communicated via file
* descriptor passing in situations where an http servers does not have
* permissions to bind to a low-numbered port.
*
* Can be called multiple times to have the http server listen to
* multiple different sockets.
*
* @param http a pointer to an evhttp object
* @param fd a socket fd that is ready for accepting connections
* @return 0 on success, -1 on failure.
* @see evhttp_free(), evhttp_bind_socket()
*/
int evhttp_accept_socket(struct evhttp *http, int fd);
/**
* Free the previously created HTTP server.
*
* Works only if no requests are currently being served.
*
* @param http the evhttp server object to be freed
* @see evhttp_start()
*/
void evhttp_free(struct evhttp* http);
/** Set a callback for a specified URI */
void evhttp_set_cb(struct evhttp *, const char *,
void (*)(struct evhttp_request *, void *), void *);
/** Removes the callback for a specified URI */
int evhttp_del_cb(struct evhttp *, const char *);
/** Set a callback for all requests that are not caught by specific callbacks
*/
void evhttp_set_gencb(struct evhttp *,
void (*)(struct evhttp_request *, void *), void *);
/**
* Set the timeout for an HTTP request.
*
* @param http an evhttp object
* @param timeout_in_secs the timeout, in seconds
*/
void evhttp_set_timeout(struct evhttp *, int timeout_in_secs);
/* Request/Response functionality */
/**
* Send an HTML error message to the client.
*
* @param req a request object
* @param error the HTTP error code
* @param reason a brief explanation of the error
*/
void evhttp_send_error(struct evhttp_request *req, int error,
const char *reason);
/**
* Send an HTML reply to the client.
*
* @param req a request object
* @param code the HTTP response code to send
* @param reason a brief message to send with the response code
* @param databuf the body of the response
*/
void evhttp_send_reply(struct evhttp_request *req, int code,
const char *reason, struct evbuffer *databuf);
/* Low-level response interface, for streaming/chunked replies */
void evhttp_send_reply_start(struct evhttp_request *, int, const char *);
void evhttp_send_reply_chunk(struct evhttp_request *, struct evbuffer *);
void evhttp_send_reply_end(struct evhttp_request *);
/**
* Start an HTTP server on the specified address and port
*
* DEPRECATED: it does not allow an event base to be specified
*
* @param address the address to which the HTTP server should be bound
* @param port the port number on which the HTTP server should listen
* @return an struct evhttp object
*/
struct evhttp *evhttp_start(const char *address, u_short port);
/*
* Interfaces for making requests
*/
enum evhttp_cmd_type { EVHTTP_REQ_GET, EVHTTP_REQ_POST, EVHTTP_REQ_HEAD };
enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE };
/**
* the request structure that a server receives.
* WARNING: expect this structure to change. I will try to provide
* reasonable accessors.
*/
struct evhttp_request {
#if defined(TAILQ_ENTRY)
TAILQ_ENTRY(evhttp_request) next;
#else
struct {
struct evhttp_request *tqe_next;
struct evhttp_request **tqe_prev;
} next;
#endif
/* the connection object that this request belongs to */
struct evhttp_connection *evcon;
int flags;
#define EVHTTP_REQ_OWN_CONNECTION 0x0001
#define EVHTTP_PROXY_REQUEST 0x0002
struct evkeyvalq *input_headers;
struct evkeyvalq *output_headers;
/* address of the remote host and the port connection came from */
char *remote_host;
u_short remote_port;
enum evhttp_request_kind kind;
enum evhttp_cmd_type type;
char *uri; /* uri after HTTP request was parsed */
char major; /* HTTP Major number */
char minor; /* HTTP Minor number */
int response_code; /* HTTP Response code */
char *response_code_line; /* Readable response */
struct evbuffer *input_buffer; /* read data */
ev_int64_t ntoread;
int chunked:1, /* a chunked request */
userdone:1; /* the user has sent all data */
struct evbuffer *output_buffer; /* outgoing post or data */
/* Callback */
void (*cb)(struct evhttp_request *, void *);
void *cb_arg;
/*
* Chunked data callback - call for each completed chunk if
* specified. If not specified, all the data is delivered via
* the regular callback.
*/
void (*chunk_cb)(struct evhttp_request *, void *);
};
/**
* Creates a new request object that needs to be filled in with the request
* parameters. The callback is executed when the request completed or an
* error occurred.
*/
struct evhttp_request *evhttp_request_new(
void (*cb)(struct evhttp_request *, void *), void *arg);
/** enable delivery of chunks to requestor */
void evhttp_request_set_chunked_cb(struct evhttp_request *,
void (*cb)(struct evhttp_request *, void *));
/** Frees the request object and removes associated events. */
void evhttp_request_free(struct evhttp_request *req);
/** Returns the connection object associated with the request or NULL */
struct evhttp_connection *evhttp_request_get_connection(struct evhttp_request *req);
/**
* A connection object that can be used to for making HTTP requests. The
* connection object tries to establish the connection when it is given an
* http request object.
*/
struct evhttp_connection *evhttp_connection_new(
const char *address, unsigned short port);
/** Frees an http connection */
void evhttp_connection_free(struct evhttp_connection *evcon);
/** sets the ip address from which http connections are made */
void evhttp_connection_set_local_address(struct evhttp_connection *evcon,
const char *address);
/** sets the local port from which http connections are made */
void evhttp_connection_set_local_port(struct evhttp_connection *evcon,
unsigned short port);
/** Sets the timeout for events related to this connection */
void evhttp_connection_set_timeout(struct evhttp_connection *evcon,
int timeout_in_secs);
/** Sets the retry limit for this connection - -1 repeats indefnitely */
void evhttp_connection_set_retries(struct evhttp_connection *evcon,
int retry_max);
/** Set a callback for connection close. */
void evhttp_connection_set_closecb(struct evhttp_connection *evcon,
void (*)(struct evhttp_connection *, void *), void *);
/**
* Associates an event base with the connection - can only be called
* on a freshly created connection object that has not been used yet.
*/
void evhttp_connection_set_base(struct evhttp_connection *evcon,
struct event_base *base);
/** Get the remote address and port associated with this connection. */
void evhttp_connection_get_peer(struct evhttp_connection *evcon,
char **address, u_short *port);
/** The connection gets ownership of the request */
int evhttp_make_request(struct evhttp_connection *evcon,
struct evhttp_request *req,
enum evhttp_cmd_type type, const char *uri);
const char *evhttp_request_uri(struct evhttp_request *req);
/* Interfaces for dealing with HTTP headers */
const char *evhttp_find_header(const struct evkeyvalq *, const char *);
int evhttp_remove_header(struct evkeyvalq *, const char *);
int evhttp_add_header(struct evkeyvalq *, const char *, const char *);
void evhttp_clear_headers(struct evkeyvalq *);
/* Miscellaneous utility functions */
/**
Helper function to encode a URI.
The returned string must be freed by the caller.
@param uri an unencoded URI
@return a newly allocated URI-encoded string
*/
char *evhttp_encode_uri(const char *uri);
/**
Helper function to decode a URI.
The returned string must be freed by the caller.
@param uri an encoded URI
@return a newly allocated unencoded URI
*/
char *evhttp_decode_uri(const char *uri);
/**
* Helper function to parse out arguments in a query.
*
* Parsing a uri like
*
* http://foo.com/?q=test&s=some+thing
*
* will result in two entries in the key value queue.
* The first entry is: key="q", value="test"
* The second entry is: key="s", value="some thing"
*
* @param uri the request URI
* @param headers the head of the evkeyval queue
*/
void evhttp_parse_query(const char *uri, struct evkeyvalq *headers);
/**
* Escape HTML character entities in a string.
*
* Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
* &#039; and &amp; correspondingly.
*
* The returned string needs to be freed by the caller.
*
* @param html an unescaped HTML string
* @return an escaped HTML string
*/
char *evhttp_htmlescape(const char *html);
#ifdef __cplusplus
}
#endif
#endif /* _EVHTTP_H_ */

View file

@ -0,0 +1,517 @@
/*
* Submitted by David Pacheco (dp.spambait@gmail.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2007 Sun Microsystems. All rights reserved.
* Use is subject to license terms.
*/
/*
* evport.c: event backend using Solaris 10 event ports. See port_create(3C).
* This implementation is loosely modeled after the one used for select(2) (in
* select.c).
*
* The outstanding events are tracked in a data structure called evport_data.
* Each entry in the ed_fds array corresponds to a file descriptor, and contains
* pointers to the read and write events that correspond to that fd. (That is,
* when the file is readable, the "read" event should handle it, etc.)
*
* evport_add and evport_del update this data structure. evport_dispatch uses it
* to determine where to callback when an event occurs (which it gets from
* port_getn).
*
* Helper functions are used: grow() grows the file descriptor array as
* necessary when large fd's come in. reassociate() takes care of maintaining
* the proper file-descriptor/event-port associations.
*
* As in the select(2) implementation, signals are handled by evsignal.
*/
#include "config.h"
#include <sys/time.h>
#include <assert.h>
#include <sys/queue.h>
#include <errno.h>
#include <poll.h>
#include <port.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#ifdef CHECK_INVARIANTS
#include <assert.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "log.h"
#include "evsignal.h"
/*
* Default value for ed_nevents, which is the maximum file descriptor number we
* can handle. If an event comes in for a file descriptor F > nevents, we will
* grow the array of file descriptors, doubling its size.
*/
#define DEFAULT_NFDS 16
/*
* EVENTS_PER_GETN is the maximum number of events to retrieve from port_getn on
* any particular call. You can speed things up by increasing this, but it will
* (obviously) require more memory.
*/
#define EVENTS_PER_GETN 8
/*
* Per-file-descriptor information about what events we're subscribed to. These
* fields are NULL if no event is subscribed to either of them.
*/
struct fd_info {
struct event* fdi_revt; /* the event responsible for the "read" */
struct event* fdi_wevt; /* the event responsible for the "write" */
};
#define FDI_HAS_READ(fdi) ((fdi)->fdi_revt != NULL)
#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_wevt != NULL)
#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
(FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
struct evport_data {
int ed_port; /* event port for system events */
int ed_nevents; /* number of allocated fdi's */
struct fd_info *ed_fds; /* allocated fdi table */
/* fdi's that we need to reassoc */
int ed_pending[EVENTS_PER_GETN]; /* fd's with pending events */
};
static void* evport_init (struct event_base *);
static int evport_add (void *, struct event *);
static int evport_del (void *, struct event *);
static int evport_dispatch (struct event_base *, void *, struct timeval *);
static void evport_dealloc (struct event_base *, void *);
const struct eventop evportops = {
"evport",
evport_init,
evport_add,
evport_del,
evport_dispatch,
evport_dealloc,
1 /* need reinit */
};
/*
* Initialize the event port implementation.
*/
static void*
evport_init(struct event_base *base)
{
struct evport_data *evpd;
int i;
/*
* Disable event ports when this environment variable is set
*/
if (evutil_getenv("EVENT_NOEVPORT"))
return (NULL);
if (!(evpd = calloc(1, sizeof(struct evport_data))))
return (NULL);
if ((evpd->ed_port = port_create()) == -1) {
free(evpd);
return (NULL);
}
/*
* Initialize file descriptor structure
*/
evpd->ed_fds = calloc(DEFAULT_NFDS, sizeof(struct fd_info));
if (evpd->ed_fds == NULL) {
close(evpd->ed_port);
free(evpd);
return (NULL);
}
evpd->ed_nevents = DEFAULT_NFDS;
for (i = 0; i < EVENTS_PER_GETN; i++)
evpd->ed_pending[i] = -1;
evsignal_init(base);
return (evpd);
}
#ifdef CHECK_INVARIANTS
/*
* Checks some basic properties about the evport_data structure. Because it
* checks all file descriptors, this function can be expensive when the maximum
* file descriptor ever used is rather large.
*/
static void
check_evportop(struct evport_data *evpd)
{
assert(evpd);
assert(evpd->ed_nevents > 0);
assert(evpd->ed_port > 0);
assert(evpd->ed_fds > 0);
/*
* Verify the integrity of the fd_info struct as well as the events to
* which it points (at least, that they're valid references and correct
* for their position in the structure).
*/
int i;
for (i = 0; i < evpd->ed_nevents; ++i) {
struct event *ev;
struct fd_info *fdi;
fdi = &evpd->ed_fds[i];
if ((ev = fdi->fdi_revt) != NULL) {
assert(ev->ev_fd == i);
}
if ((ev = fdi->fdi_wevt) != NULL) {
assert(ev->ev_fd == i);
}
}
}
/*
* Verifies very basic integrity of a given port_event.
*/
static void
check_event(port_event_t* pevt)
{
/*
* We've only registered for PORT_SOURCE_FD events. The only
* other thing we can legitimately receive is PORT_SOURCE_ALERT,
* but since we're not using port_alert either, we can assume
* PORT_SOURCE_FD.
*/
assert(pevt->portev_source == PORT_SOURCE_FD);
assert(pevt->portev_user == NULL);
}
#else
#define check_evportop(epop)
#define check_event(pevt)
#endif /* CHECK_INVARIANTS */
/*
* Doubles the size of the allocated file descriptor array.
*/
static int
grow(struct evport_data *epdp, int factor)
{
struct fd_info *tmp;
int oldsize = epdp->ed_nevents;
int newsize = factor * oldsize;
assert(factor > 1);
check_evportop(epdp);
tmp = realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
if (NULL == tmp)
return -1;
epdp->ed_fds = tmp;
memset((char*) (epdp->ed_fds + oldsize), 0,
(newsize - oldsize)*sizeof(struct fd_info));
epdp->ed_nevents = newsize;
check_evportop(epdp);
return 0;
}
/*
* (Re)associates the given file descriptor with the event port. The OS events
* are specified (implicitly) from the fd_info struct.
*/
static int
reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
{
int sysevents = FDI_TO_SYSEVENTS(fdip);
if (sysevents != 0) {
if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
fd, sysevents, NULL) == -1) {
event_warn("port_associate");
return (-1);
}
}
check_evportop(epdp);
return (0);
}
/*
* Main event loop - polls port_getn for some number of events, and processes
* them.
*/
static int
evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
int i, res;
struct evport_data *epdp = arg;
port_event_t pevtlist[EVENTS_PER_GETN];
/*
* port_getn will block until it has at least nevents events. It will
* also return how many it's given us (which may be more than we asked
* for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
* nevents.
*/
int nevents = 1;
/*
* We have to convert a struct timeval to a struct timespec
* (only difference is nanoseconds vs. microseconds). If no time-based
* events are active, we should wait for I/O (and tv == NULL).
*/
struct timespec ts;
struct timespec *ts_p = NULL;
if (tv != NULL) {
ts.tv_sec = tv->tv_sec;
ts.tv_nsec = tv->tv_usec * 1000;
ts_p = &ts;
}
/*
* Before doing anything else, we need to reassociate the events we hit
* last time which need reassociation. See comment at the end of the
* loop below.
*/
for (i = 0; i < EVENTS_PER_GETN; ++i) {
struct fd_info *fdi = NULL;
if (epdp->ed_pending[i] != -1) {
fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
}
if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
int fd = FDI_HAS_READ(fdi) ? fdi->fdi_revt->ev_fd :
fdi->fdi_wevt->ev_fd;
reassociate(epdp, fdi, fd);
epdp->ed_pending[i] = -1;
}
}
if ((res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN,
(unsigned int *) &nevents, ts_p)) == -1) {
if (errno == EINTR || errno == EAGAIN) {
evsignal_process(base);
return (0);
} else if (errno == ETIME) {
if (nevents == 0)
return (0);
} else {
event_warn("port_getn");
return (-1);
}
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: port_getn reports %d events", __func__, nevents));
for (i = 0; i < nevents; ++i) {
struct event *ev;
struct fd_info *fdi;
port_event_t *pevt = &pevtlist[i];
int fd = (int) pevt->portev_object;
check_evportop(epdp);
check_event(pevt);
epdp->ed_pending[i] = fd;
/*
* Figure out what kind of event it was
* (because we have to pass this to the callback)
*/
res = 0;
if (pevt->portev_events & POLLIN)
res |= EV_READ;
if (pevt->portev_events & POLLOUT)
res |= EV_WRITE;
/*
* Check for the error situations or a hangup situation
*/
if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL))
res |= EV_READ|EV_WRITE;
assert(epdp->ed_nevents > fd);
fdi = &(epdp->ed_fds[fd]);
/*
* We now check for each of the possible events (READ
* or WRITE). Then, we activate the event (which will
* cause its callback to be executed).
*/
if ((res & EV_READ) && ((ev = fdi->fdi_revt) != NULL)) {
event_active(ev, res, 1);
}
if ((res & EV_WRITE) && ((ev = fdi->fdi_wevt) != NULL)) {
event_active(ev, res, 1);
}
} /* end of all events gotten */
check_evportop(epdp);
return (0);
}
/*
* Adds the given event (so that you will be notified when it happens via
* the callback function).
*/
static int
evport_add(void *arg, struct event *ev)
{
struct evport_data *evpd = arg;
struct fd_info *fdi;
int factor;
check_evportop(evpd);
/*
* Delegate, if it's not ours to handle.
*/
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
/*
* If necessary, grow the file descriptor info table
*/
factor = 1;
while (ev->ev_fd >= factor * evpd->ed_nevents)
factor *= 2;
if (factor > 1) {
if (-1 == grow(evpd, factor)) {
return (-1);
}
}
fdi = &evpd->ed_fds[ev->ev_fd];
if (ev->ev_events & EV_READ)
fdi->fdi_revt = ev;
if (ev->ev_events & EV_WRITE)
fdi->fdi_wevt = ev;
return reassociate(evpd, fdi, ev->ev_fd);
}
/*
* Removes the given event from the list of events to wait for.
*/
static int
evport_del(void *arg, struct event *ev)
{
struct evport_data *evpd = arg;
struct fd_info *fdi;
int i;
int associated = 1;
check_evportop(evpd);
/*
* Delegate, if it's not ours to handle
*/
if (ev->ev_events & EV_SIGNAL) {
return (evsignal_del(ev));
}
if (evpd->ed_nevents < ev->ev_fd) {
return (-1);
}
for (i = 0; i < EVENTS_PER_GETN; ++i) {
if (evpd->ed_pending[i] == ev->ev_fd) {
associated = 0;
break;
}
}
fdi = &evpd->ed_fds[ev->ev_fd];
if (ev->ev_events & EV_READ)
fdi->fdi_revt = NULL;
if (ev->ev_events & EV_WRITE)
fdi->fdi_wevt = NULL;
if (associated) {
if (!FDI_HAS_EVENTS(fdi) &&
port_dissociate(evpd->ed_port, PORT_SOURCE_FD,
ev->ev_fd) == -1) {
/*
* Ignre EBADFD error the fd could have been closed
* before event_del() was called.
*/
if (errno != EBADFD) {
event_warn("port_dissociate");
return (-1);
}
} else {
if (FDI_HAS_EVENTS(fdi)) {
return (reassociate(evpd, fdi, ev->ev_fd));
}
}
} else {
if (fdi->fdi_revt == NULL && fdi->fdi_wevt == NULL) {
evpd->ed_pending[i] = -1;
}
}
return 0;
}
static void
evport_dealloc(struct event_base *base, void *arg)
{
struct evport_data *evpd = arg;
evsignal_dealloc(base);
close(evpd->ed_port);
if (evpd->ed_fds)
free(evpd->ed_fds);
free(evpd);
}

View file

@ -0,0 +1,87 @@
/*
* Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVRPC_INTERNAL_H_
#define _EVRPC_INTERNAL_H_
#include "http-internal.h"
struct evrpc;
#define EVRPC_URI_PREFIX "/.rpc."
struct evrpc_hook {
TAILQ_ENTRY(evrpc_hook) (next);
/* returns -1; if the rpc should be aborted, is allowed to rewrite */
int (*process)(struct evhttp_request *, struct evbuffer *, void *);
void *process_arg;
};
TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
/*
* this is shared between the base and the pool, so that we can reuse
* the hook adding functions; we alias both evrpc_pool and evrpc_base
* to this common structure.
*/
struct _evrpc_hooks {
/* hooks for processing outbound and inbound rpcs */
struct evrpc_hook_list in_hooks;
struct evrpc_hook_list out_hooks;
};
#define input_hooks common.in_hooks
#define output_hooks common.out_hooks
struct evrpc_base {
struct _evrpc_hooks common;
/* the HTTP server under which we register our RPC calls */
struct evhttp* http_server;
/* a list of all RPCs registered with us */
TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
};
struct evrpc_req_generic;
void evrpc_reqstate_free(struct evrpc_req_generic* rpc_state);
/* A pool for holding evhttp_connection objects */
struct evrpc_pool {
struct _evrpc_hooks common;
struct event_base *base;
struct evconq connections;
int timeout;
TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) requests;
};
#endif /* _EVRPC_INTERNAL_H_ */

View file

@ -0,0 +1,656 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#ifndef WIN32
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <assert.h>
#include "event.h"
#include "evrpc.h"
#include "evrpc-internal.h"
#include "evhttp.h"
#include "evutil.h"
#include "log.h"
struct evrpc_base *
evrpc_init(struct evhttp *http_server)
{
struct evrpc_base* base = calloc(1, sizeof(struct evrpc_base));
if (base == NULL)
return (NULL);
/* we rely on the tagging sub system */
evtag_init();
TAILQ_INIT(&base->registered_rpcs);
TAILQ_INIT(&base->input_hooks);
TAILQ_INIT(&base->output_hooks);
base->http_server = http_server;
return (base);
}
void
evrpc_free(struct evrpc_base *base)
{
struct evrpc *rpc;
struct evrpc_hook *hook;
while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) {
assert(evrpc_unregister_rpc(base, rpc->uri));
}
while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) {
assert(evrpc_remove_hook(base, EVRPC_INPUT, hook));
}
while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) {
assert(evrpc_remove_hook(base, EVRPC_OUTPUT, hook));
}
free(base);
}
void *
evrpc_add_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hook_list *head = NULL;
struct evrpc_hook *hook = NULL;
switch (hook_type) {
case EVRPC_INPUT:
head = &base->in_hooks;
break;
case EVRPC_OUTPUT:
head = &base->out_hooks;
break;
default:
assert(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
}
hook = calloc(1, sizeof(struct evrpc_hook));
assert(hook != NULL);
hook->process = cb;
hook->process_arg = cb_arg;
TAILQ_INSERT_TAIL(head, hook, next);
return (hook);
}
static int
evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
{
struct evrpc_hook *hook = NULL;
TAILQ_FOREACH(hook, head, next) {
if (hook == handle) {
TAILQ_REMOVE(head, hook, next);
free(hook);
return (1);
}
}
return (0);
}
/*
* remove the hook specified by the handle
*/
int
evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
{
struct _evrpc_hooks *base = vbase;
struct evrpc_hook_list *head = NULL;
switch (hook_type) {
case EVRPC_INPUT:
head = &base->in_hooks;
break;
case EVRPC_OUTPUT:
head = &base->out_hooks;
break;
default:
assert(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
}
return (evrpc_remove_hook_internal(head, handle));
}
static int
evrpc_process_hooks(struct evrpc_hook_list *head,
struct evhttp_request *req, struct evbuffer *evbuf)
{
struct evrpc_hook *hook;
TAILQ_FOREACH(hook, head, next) {
if (hook->process(req, evbuf, hook->process_arg) == -1)
return (-1);
}
return (0);
}
static void evrpc_pool_schedule(struct evrpc_pool *pool);
static void evrpc_request_cb(struct evhttp_request *, void *);
void evrpc_request_done(struct evrpc_req_generic*);
/*
* Registers a new RPC with the HTTP server. The evrpc object is expected
* to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn
* calls this function.
*/
static char *
evrpc_construct_uri(const char *uri)
{
char *constructed_uri;
int constructed_uri_len;
constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1;
if ((constructed_uri = malloc(constructed_uri_len)) == NULL)
event_err(1, "%s: failed to register rpc at %s",
__func__, uri);
memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX));
memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri));
constructed_uri[constructed_uri_len - 1] = '\0';
return (constructed_uri);
}
int
evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc,
void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg)
{
char *constructed_uri = evrpc_construct_uri(rpc->uri);
rpc->base = base;
rpc->cb = cb;
rpc->cb_arg = cb_arg;
TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next);
evhttp_set_cb(base->http_server,
constructed_uri,
evrpc_request_cb,
rpc);
free(constructed_uri);
return (0);
}
int
evrpc_unregister_rpc(struct evrpc_base *base, const char *name)
{
char *registered_uri = NULL;
struct evrpc *rpc;
/* find the right rpc; linear search might be slow */
TAILQ_FOREACH(rpc, &base->registered_rpcs, next) {
if (strcmp(rpc->uri, name) == 0)
break;
}
if (rpc == NULL) {
/* We did not find an RPC with this name */
return (-1);
}
TAILQ_REMOVE(&base->registered_rpcs, rpc, next);
free((char *)rpc->uri);
free(rpc);
registered_uri = evrpc_construct_uri(name);
/* remove the http server callback */
assert(evhttp_del_cb(base->http_server, registered_uri) == 0);
free(registered_uri);
return (0);
}
static void
evrpc_request_cb(struct evhttp_request *req, void *arg)
{
struct evrpc *rpc = arg;
struct evrpc_req_generic *rpc_state = NULL;
/* let's verify the outside parameters */
if (req->type != EVHTTP_REQ_POST ||
EVBUFFER_LENGTH(req->input_buffer) <= 0)
goto error;
/*
* we might want to allow hooks to suspend the processing,
* but at the moment, we assume that they just act as simple
* filters.
*/
if (evrpc_process_hooks(&rpc->base->input_hooks,
req, req->input_buffer) == -1)
goto error;
rpc_state = calloc(1, sizeof(struct evrpc_req_generic));
if (rpc_state == NULL)
goto error;
/* let's check that we can parse the request */
rpc_state->request = rpc->request_new();
if (rpc_state->request == NULL)
goto error;
rpc_state->rpc = rpc;
if (rpc->request_unmarshal(
rpc_state->request, req->input_buffer) == -1) {
/* we failed to parse the request; that's a bummer */
goto error;
}
/* at this point, we have a well formed request, prepare the reply */
rpc_state->reply = rpc->reply_new();
if (rpc_state->reply == NULL)
goto error;
rpc_state->http_req = req;
rpc_state->done = evrpc_request_done;
/* give the rpc to the user; they can deal with it */
rpc->cb(rpc_state, rpc->cb_arg);
return;
error:
evrpc_reqstate_free(rpc_state);
evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
return;
}
void
evrpc_reqstate_free(struct evrpc_req_generic* rpc_state)
{
/* clean up all memory */
if (rpc_state != NULL) {
struct evrpc *rpc = rpc_state->rpc;
if (rpc_state->request != NULL)
rpc->request_free(rpc_state->request);
if (rpc_state->reply != NULL)
rpc->reply_free(rpc_state->reply);
free(rpc_state);
}
}
void
evrpc_request_done(struct evrpc_req_generic* rpc_state)
{
struct evhttp_request *req = rpc_state->http_req;
struct evrpc *rpc = rpc_state->rpc;
struct evbuffer* data = NULL;
if (rpc->reply_complete(rpc_state->reply) == -1) {
/* the reply was not completely filled in. error out */
goto error;
}
if ((data = evbuffer_new()) == NULL) {
/* out of memory */
goto error;
}
/* serialize the reply */
rpc->reply_marshal(data, rpc_state->reply);
/* do hook based tweaks to the request */
if (evrpc_process_hooks(&rpc->base->output_hooks,
req, data) == -1)
goto error;
/* on success, we are going to transmit marshaled binary data */
if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) {
evhttp_add_header(req->output_headers,
"Content-Type", "application/octet-stream");
}
evhttp_send_reply(req, HTTP_OK, "OK", data);
evbuffer_free(data);
evrpc_reqstate_free(rpc_state);
return;
error:
if (data != NULL)
evbuffer_free(data);
evrpc_reqstate_free(rpc_state);
evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
return;
}
/* Client implementation of RPC site */
static int evrpc_schedule_request(struct evhttp_connection *connection,
struct evrpc_request_wrapper *ctx);
struct evrpc_pool *
evrpc_pool_new(struct event_base *base)
{
struct evrpc_pool *pool = calloc(1, sizeof(struct evrpc_pool));
if (pool == NULL)
return (NULL);
TAILQ_INIT(&pool->connections);
TAILQ_INIT(&pool->requests);
TAILQ_INIT(&pool->input_hooks);
TAILQ_INIT(&pool->output_hooks);
pool->base = base;
pool->timeout = -1;
return (pool);
}
static void
evrpc_request_wrapper_free(struct evrpc_request_wrapper *request)
{
free(request->name);
free(request);
}
void
evrpc_pool_free(struct evrpc_pool *pool)
{
struct evhttp_connection *connection;
struct evrpc_request_wrapper *request;
struct evrpc_hook *hook;
while ((request = TAILQ_FIRST(&pool->requests)) != NULL) {
TAILQ_REMOVE(&pool->requests, request, next);
/* if this gets more complicated we need our own function */
evrpc_request_wrapper_free(request);
}
while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) {
TAILQ_REMOVE(&pool->connections, connection, next);
evhttp_connection_free(connection);
}
while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) {
assert(evrpc_remove_hook(pool, EVRPC_INPUT, hook));
}
while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) {
assert(evrpc_remove_hook(pool, EVRPC_OUTPUT, hook));
}
free(pool);
}
/*
* Add a connection to the RPC pool. A request scheduled on the pool
* may use any available connection.
*/
void
evrpc_pool_add_connection(struct evrpc_pool *pool,
struct evhttp_connection *connection) {
assert(connection->http_server == NULL);
TAILQ_INSERT_TAIL(&pool->connections, connection, next);
/*
* associate an event base with this connection
*/
if (pool->base != NULL)
evhttp_connection_set_base(connection, pool->base);
/*
* unless a timeout was specifically set for a connection,
* the connection inherits the timeout from the pool.
*/
if (connection->timeout == -1)
connection->timeout = pool->timeout;
/*
* if we have any requests pending, schedule them with the new
* connections.
*/
if (TAILQ_FIRST(&pool->requests) != NULL) {
struct evrpc_request_wrapper *request =
TAILQ_FIRST(&pool->requests);
TAILQ_REMOVE(&pool->requests, request, next);
evrpc_schedule_request(connection, request);
}
}
void
evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs)
{
struct evhttp_connection *evcon;
TAILQ_FOREACH(evcon, &pool->connections, next) {
evcon->timeout = timeout_in_secs;
}
pool->timeout = timeout_in_secs;
}
static void evrpc_reply_done(struct evhttp_request *, void *);
static void evrpc_request_timeout(int, short, void *);
/*
* Finds a connection object associated with the pool that is currently
* idle and can be used to make a request.
*/
static struct evhttp_connection *
evrpc_pool_find_connection(struct evrpc_pool *pool)
{
struct evhttp_connection *connection;
TAILQ_FOREACH(connection, &pool->connections, next) {
if (TAILQ_FIRST(&connection->requests) == NULL)
return (connection);
}
return (NULL);
}
/*
* We assume that the ctx is no longer queued on the pool.
*/
static int
evrpc_schedule_request(struct evhttp_connection *connection,
struct evrpc_request_wrapper *ctx)
{
struct evhttp_request *req = NULL;
struct evrpc_pool *pool = ctx->pool;
struct evrpc_status status;
char *uri = NULL;
int res = 0;
if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL)
goto error;
/* serialize the request data into the output buffer */
ctx->request_marshal(req->output_buffer, ctx->request);
uri = evrpc_construct_uri(ctx->name);
if (uri == NULL)
goto error;
/* we need to know the connection that we might have to abort */
ctx->evcon = connection;
/* apply hooks to the outgoing request */
if (evrpc_process_hooks(&pool->output_hooks,
req, req->output_buffer) == -1)
goto error;
if (pool->timeout > 0) {
/*
* a timeout after which the whole rpc is going to be aborted.
*/
struct timeval tv;
evutil_timerclear(&tv);
tv.tv_sec = pool->timeout;
evtimer_add(&ctx->ev_timeout, &tv);
}
/* start the request over the connection */
res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri);
free(uri);
if (res == -1)
goto error;
return (0);
error:
memset(&status, 0, sizeof(status));
status.error = EVRPC_STATUS_ERR_UNSTARTED;
(*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
evrpc_request_wrapper_free(ctx);
return (-1);
}
int
evrpc_make_request(struct evrpc_request_wrapper *ctx)
{
struct evrpc_pool *pool = ctx->pool;
/* initialize the event structure for this rpc */
evtimer_set(&ctx->ev_timeout, evrpc_request_timeout, ctx);
if (pool->base != NULL)
event_base_set(pool->base, &ctx->ev_timeout);
/* we better have some available connections on the pool */
assert(TAILQ_FIRST(&pool->connections) != NULL);
/*
* if no connection is available, we queue the request on the pool,
* the next time a connection is empty, the rpc will be send on that.
*/
TAILQ_INSERT_TAIL(&pool->requests, ctx, next);
evrpc_pool_schedule(pool);
return (0);
}
static void
evrpc_reply_done(struct evhttp_request *req, void *arg)
{
struct evrpc_request_wrapper *ctx = arg;
struct evrpc_pool *pool = ctx->pool;
struct evrpc_status status;
int res = -1;
/* cancel any timeout we might have scheduled */
event_del(&ctx->ev_timeout);
memset(&status, 0, sizeof(status));
status.http_req = req;
/* we need to get the reply now */
if (req != NULL) {
/* apply hooks to the incoming request */
if (evrpc_process_hooks(&pool->input_hooks,
req, req->input_buffer) == -1) {
status.error = EVRPC_STATUS_ERR_HOOKABORTED;
res = -1;
} else {
res = ctx->reply_unmarshal(ctx->reply,
req->input_buffer);
if (res == -1) {
status.error = EVRPC_STATUS_ERR_BADPAYLOAD;
}
}
} else {
status.error = EVRPC_STATUS_ERR_TIMEOUT;
}
if (res == -1) {
/* clear everything that we might have written previously */
ctx->reply_clear(ctx->reply);
}
(*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
evrpc_request_wrapper_free(ctx);
/* the http layer owns the request structure */
/* see if we can schedule another request */
evrpc_pool_schedule(pool);
}
static void
evrpc_pool_schedule(struct evrpc_pool *pool)
{
struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests);
struct evhttp_connection *evcon;
/* if no requests are pending, we have no work */
if (ctx == NULL)
return;
if ((evcon = evrpc_pool_find_connection(pool)) != NULL) {
TAILQ_REMOVE(&pool->requests, ctx, next);
evrpc_schedule_request(evcon, ctx);
}
}
static void
evrpc_request_timeout(int fd, short what, void *arg)
{
struct evrpc_request_wrapper *ctx = arg;
struct evhttp_connection *evcon = ctx->evcon;
assert(evcon != NULL);
evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
}

View file

@ -0,0 +1,486 @@
/*
* Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVRPC_H_
#define _EVRPC_H_
#ifdef __cplusplus
extern "C" {
#endif
/** @file evrpc.h
*
* This header files provides basic support for an RPC server and client.
*
* To support RPCs in a server, every supported RPC command needs to be
* defined and registered.
*
* EVRPC_HEADER(SendCommand, Request, Reply);
*
* SendCommand is the name of the RPC command.
* Request is the name of a structure generated by event_rpcgen.py.
* It contains all parameters relating to the SendCommand RPC. The
* server needs to fill in the Reply structure.
* Reply is the name of a structure generated by event_rpcgen.py. It
* contains the answer to the RPC.
*
* To register an RPC with an HTTP server, you need to first create an RPC
* base with:
*
* struct evrpc_base *base = evrpc_init(http);
*
* A specific RPC can then be registered with
*
* EVRPC_REGISTER(base, SendCommand, Request, Reply, FunctionCB, arg);
*
* when the server receives an appropriately formatted RPC, the user callback
* is invokved. The callback needs to fill in the reply structure.
*
* void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg);
*
* To send the reply, call EVRPC_REQUEST_DONE(rpc);
*
* See the regression test for an example.
*/
struct evbuffer;
struct event_base;
struct evrpc_req_generic;
/* Encapsulates a request */
struct evrpc {
TAILQ_ENTRY(evrpc) next;
/* the URI at which the request handler lives */
const char* uri;
/* creates a new request structure */
void *(*request_new)(void);
/* frees the request structure */
void (*request_free)(void *);
/* unmarshals the buffer into the proper request structure */
int (*request_unmarshal)(void *, struct evbuffer *);
/* creates a new reply structure */
void *(*reply_new)(void);
/* creates a new reply structure */
void (*reply_free)(void *);
/* verifies that the reply is valid */
int (*reply_complete)(void *);
/* marshals the reply into a buffer */
void (*reply_marshal)(struct evbuffer*, void *);
/* the callback invoked for each received rpc */
void (*cb)(struct evrpc_req_generic *, void *);
void *cb_arg;
/* reference for further configuration */
struct evrpc_base *base;
};
/** The type of a specific RPC Message
*
* @param rpcname the name of the RPC message
*/
#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname
struct evhttp_request;
struct evrpc_status;
/* We alias the RPC specific structs to this voided one */
struct evrpc_req_generic {
/* the unmarshaled request object */
void *request;
/* the empty reply object that needs to be filled in */
void *reply;
/*
* the static structure for this rpc; that can be used to
* automatically unmarshal and marshal the http buffers.
*/
struct evrpc *rpc;
/*
* the http request structure on which we need to answer.
*/
struct evhttp_request* http_req;
/*
* callback to reply and finish answering this rpc
*/
void (*done)(struct evrpc_req_generic* rpc);
};
/** Creates the definitions and prototypes for an RPC
*
* You need to use EVRPC_HEADER to create structures and function prototypes
* needed by the server and client implementation. The structures have to be
* defined in an .rpc file and converted to source code via event_rpcgen.py
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @see EVRPC_GENERATE()
*/
#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \
EVRPC_STRUCT(rpcname) { \
struct reqstruct* request; \
struct rplystruct* reply; \
struct evrpc* rpc; \
struct evhttp_request* http_req; \
void (*done)(struct evrpc_status *, \
struct evrpc* rpc, void *request, void *reply); \
}; \
int evrpc_send_request_##rpcname(struct evrpc_pool *, \
struct reqstruct *, struct rplystruct *, \
void (*)(struct evrpc_status *, \
struct reqstruct *, struct rplystruct *, void *cbarg), \
void *);
/** Generates the code for receiving and sending an RPC message
*
* EVRPC_GENERATE is used to create the code corresponding to sending
* and receiving a particular RPC message
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @see EVRPC_HEADER()
*/
#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \
int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \
struct reqstruct *request, struct rplystruct *reply, \
void (*cb)(struct evrpc_status *, \
struct reqstruct *, struct rplystruct *, void *cbarg), \
void *cbarg) { \
struct evrpc_status status; \
struct evrpc_request_wrapper *ctx; \
ctx = (struct evrpc_request_wrapper *) \
malloc(sizeof(struct evrpc_request_wrapper)); \
if (ctx == NULL) \
goto error; \
ctx->pool = pool; \
ctx->evcon = NULL; \
ctx->name = strdup(#rpcname); \
if (ctx->name == NULL) { \
free(ctx); \
goto error; \
} \
ctx->cb = (void (*)(struct evrpc_status *, \
void *, void *, void *))cb; \
ctx->cb_arg = cbarg; \
ctx->request = (void *)request; \
ctx->reply = (void *)reply; \
ctx->request_marshal = (void (*)(struct evbuffer *, void *))reqstruct##_marshal; \
ctx->reply_clear = (void (*)(void *))rplystruct##_clear; \
ctx->reply_unmarshal = (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal; \
return (evrpc_make_request(ctx)); \
error: \
memset(&status, 0, sizeof(status)); \
status.error = EVRPC_STATUS_ERR_UNSTARTED; \
(*(cb))(&status, request, reply, cbarg); \
return (-1); \
}
/** Provides access to the HTTP request object underlying an RPC
*
* Access to the underlying http object; can be used to look at headers or
* for getting the remote ip address
*
* @param rpc_req the rpc request structure provided to the server callback
* @return an struct evhttp_request object that can be inspected for
* HTTP headers or sender information.
*/
#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req
/** Creates the reply to an RPC request
*
* EVRPC_REQUEST_DONE is used to answer a request; the reply is expected
* to have been filled in. The request and reply pointers become invalid
* after this call has finished.
*
* @param rpc_req the rpc request structure provided to the server callback
*/
#define EVRPC_REQUEST_DONE(rpc_req) do { \
struct evrpc_req_generic *_req = (struct evrpc_req_generic *)(rpc_req); \
_req->done(_req); \
} while (0)
/* Takes a request object and fills it in with the right magic */
#define EVRPC_REGISTER_OBJECT(rpc, name, request, reply) \
do { \
(rpc)->uri = strdup(#name); \
if ((rpc)->uri == NULL) { \
fprintf(stderr, "failed to register object\n"); \
exit(1); \
} \
(rpc)->request_new = (void *(*)(void))request##_new; \
(rpc)->request_free = (void (*)(void *))request##_free; \
(rpc)->request_unmarshal = (int (*)(void *, struct evbuffer *))request##_unmarshal; \
(rpc)->reply_new = (void *(*)(void))reply##_new; \
(rpc)->reply_free = (void (*)(void *))reply##_free; \
(rpc)->reply_complete = (int (*)(void *))reply##_complete; \
(rpc)->reply_marshal = (void (*)(struct evbuffer*, void *))reply##_marshal; \
} while (0)
struct evrpc_base;
struct evhttp;
/* functions to start up the rpc system */
/** Creates a new rpc base from which RPC requests can be received
*
* @param server a pointer to an existing HTTP server
* @return a newly allocated evrpc_base struct
* @see evrpc_free()
*/
struct evrpc_base *evrpc_init(struct evhttp *server);
/**
* Frees the evrpc base
*
* For now, you are responsible for making sure that no rpcs are ongoing.
*
* @param base the evrpc_base object to be freed
* @see evrpc_init
*/
void evrpc_free(struct evrpc_base *base);
/** register RPCs with the HTTP Server
*
* registers a new RPC with the HTTP server, each RPC needs to have
* a unique name under which it can be identified.
*
* @param base the evrpc_base structure in which the RPC should be
* registered.
* @param name the name of the RPC
* @param request the name of the RPC request structure
* @param reply the name of the RPC reply structure
* @param callback the callback that should be invoked when the RPC
* is received. The callback has the following prototype
* void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg)
* @param cbarg an additional parameter that can be passed to the callback.
* The parameter can be used to carry around state.
*/
#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \
do { \
struct evrpc* rpc = (struct evrpc *)calloc(1, sizeof(struct evrpc)); \
EVRPC_REGISTER_OBJECT(rpc, name, request, reply); \
evrpc_register_rpc(base, rpc, \
(void (*)(struct evrpc_req_generic*, void *))callback, cbarg); \
} while (0)
int evrpc_register_rpc(struct evrpc_base *, struct evrpc *,
void (*)(struct evrpc_req_generic*, void *), void *);
/**
* Unregisters an already registered RPC
*
* @param base the evrpc_base object from which to unregister an RPC
* @param name the name of the rpc to unregister
* @return -1 on error or 0 when successful.
* @see EVRPC_REGISTER()
*/
#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc(base, #name)
int evrpc_unregister_rpc(struct evrpc_base *base, const char *name);
/*
* Client-side RPC support
*/
struct evrpc_pool;
struct evhttp_connection;
/**
* provides information about the completed RPC request.
*/
struct evrpc_status {
#define EVRPC_STATUS_ERR_NONE 0
#define EVRPC_STATUS_ERR_TIMEOUT 1
#define EVRPC_STATUS_ERR_BADPAYLOAD 2
#define EVRPC_STATUS_ERR_UNSTARTED 3
#define EVRPC_STATUS_ERR_HOOKABORTED 4
int error;
/* for looking at headers or other information */
struct evhttp_request *http_req;
};
struct evrpc_request_wrapper {
TAILQ_ENTRY(evrpc_request_wrapper) next;
/* pool on which this rpc request is being made */
struct evrpc_pool *pool;
/* connection on which the request is being sent */
struct evhttp_connection *evcon;
/* event for implementing request timeouts */
struct event ev_timeout;
/* the name of the rpc */
char *name;
/* callback */
void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
void *cb_arg;
void *request;
void *reply;
/* unmarshals the buffer into the proper request structure */
void (*request_marshal)(struct evbuffer *, void *);
/* removes all stored state in the reply */
void (*reply_clear)(void *);
/* marshals the reply into a buffer */
int (*reply_unmarshal)(void *, struct evbuffer*);
};
/** launches an RPC and sends it to the server
*
* EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server.
*
* @param name the name of the RPC
* @param pool the evrpc_pool that contains the connection objects over which
* the request should be sent.
* @param request a pointer to the RPC request structure - it contains the
* data to be sent to the server.
* @param reply a pointer to the RPC reply structure. It is going to be filled
* if the request was answered successfully
* @param cb the callback to invoke when the RPC request has been answered
* @param cbarg an additional argument to be passed to the client
* @return 0 on success, -1 on failure
*/
#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg) \
evrpc_send_request_##name(pool, request, reply, cb, cbarg)
int evrpc_make_request(struct evrpc_request_wrapper *);
/** creates an rpc connection pool
*
* a pool has a number of connections associated with it.
* rpc requests are always made via a pool.
*
* @param base a pointer to an struct event_based object; can be left NULL
* in singled-threaded applications
* @return a newly allocated struct evrpc_pool object
* @see evrpc_pool_free()
*/
struct evrpc_pool *evrpc_pool_new(struct event_base *base);
/** frees an rpc connection pool
*
* @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new()
* @see evrpc_pool_new()
*/
void evrpc_pool_free(struct evrpc_pool *pool);
/*
* adds a connection over which rpc can be dispatched. the connection
* object must have been newly created.
*/
void evrpc_pool_add_connection(struct evrpc_pool *,
struct evhttp_connection *);
/**
* Sets the timeout in secs after which a request has to complete. The
* RPC is completely aborted if it does not complete by then. Setting
* the timeout to 0 means that it never timeouts and can be used to
* implement callback type RPCs.
*
* Any connection already in the pool will be updated with the new
* timeout. Connections added to the pool after set_timeout has be
* called receive the pool timeout only if no timeout has been set
* for the connection itself.
*
* @param pool a pointer to a struct evrpc_pool object
* @param timeout_in_secs the number of seconds after which a request should
* timeout and a failure be returned to the callback.
*/
void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs);
/**
* Hooks for changing the input and output of RPCs; this can be used to
* implement compression, authentication, encryption, ...
*/
enum EVRPC_HOOK_TYPE {
EVRPC_INPUT, /**< apply the function to an input hook */
EVRPC_OUTPUT /**< apply the function to an output hook */
};
#ifndef WIN32
/** Deprecated alias for EVRPC_INPUT. Not available on windows, where it
* conflicts with platform headers. */
#define INPUT EVRPC_INPUT
/** Deprecated alias for EVRPC_OUTPUT. Not available on windows, where it
* conflicts with platform headers. */
#define OUTPUT EVRPC_OUTPUT
#endif
/** adds a processing hook to either an rpc base or rpc pool
*
* If a hook returns -1, the processing is aborted.
*
* The add functions return handles that can be used for removing hooks.
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param hook_type either INPUT or OUTPUT
* @param cb the callback to call when the hook is activated
* @param cb_arg an additional argument for the callback
* @return a handle to the hook so it can be removed later
* @see evrpc_remove_hook()
*/
void *evrpc_add_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg);
/** removes a previously added hook
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param hook_type either INPUT or OUTPUT
* @param handle a handle returned by evrpc_add_hook()
* @return 1 on success or 0 on failure
* @see evrpc_add_hook()
*/
int evrpc_remove_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
void *handle);
#ifdef __cplusplus
}
#endif
#endif /* _EVRPC_H_ */

View file

@ -0,0 +1,52 @@
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVSIGNAL_H_
#define _EVSIGNAL_H_
typedef void (*ev_sighandler_t)(int);
struct evsignal_info {
struct event ev_signal;
int ev_signal_pair[2];
int ev_signal_added;
volatile sig_atomic_t evsignal_caught;
struct event_list evsigevents[NSIG];
sig_atomic_t evsigcaught[NSIG];
#ifdef HAVE_SIGACTION
struct sigaction **sh_old;
#else
ev_sighandler_t **sh_old;
#endif
int sh_old_max;
};
int evsignal_init(struct event_base *);
void evsignal_process(struct event_base *);
int evsignal_add(struct event *);
int evsignal_del(struct event *);
void evsignal_dealloc(struct event_base *);
#endif /* _EVSIGNAL_H_ */

View file

@ -0,0 +1,283 @@
/*
* Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#ifdef WIN32
#include <winsock2.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#include <errno.h>
#if defined WIN32 && !defined(HAVE_GETTIMEOFDAY_H)
#include <sys/timeb.h>
#endif
#include <stdio.h>
#include <signal.h>
#include <sys/queue.h>
#include "event.h"
#include "event-internal.h"
#include "evutil.h"
#include "log.h"
int
evutil_socketpair(int family, int type, int protocol, int fd[2])
{
#ifndef WIN32
return socketpair(family, type, protocol, fd);
#else
/* This code is originally from Tor. Used with permission. */
/* This socketpair does not work when localhost is down. So
* it's really not the same thing at all. But it's close enough
* for now, and really, when localhost is down sometimes, we
* have other problems too.
*/
int listener = -1;
int connector = -1;
int acceptor = -1;
struct sockaddr_in listen_addr;
struct sockaddr_in connect_addr;
int size;
int saved_errno = -1;
if (protocol
#ifdef AF_UNIX
|| family != AF_UNIX
#endif
) {
EVUTIL_SET_SOCKET_ERROR(WSAEAFNOSUPPORT);
return -1;
}
if (!fd) {
EVUTIL_SET_SOCKET_ERROR(WSAEINVAL);
return -1;
}
listener = socket(AF_INET, type, 0);
if (listener < 0)
return -1;
memset(&listen_addr, 0, sizeof(listen_addr));
listen_addr.sin_family = AF_INET;
listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
listen_addr.sin_port = 0; /* kernel chooses port. */
if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr))
== -1)
goto tidy_up_and_fail;
if (listen(listener, 1) == -1)
goto tidy_up_and_fail;
connector = socket(AF_INET, type, 0);
if (connector < 0)
goto tidy_up_and_fail;
/* We want to find out the port number to connect to. */
size = sizeof(connect_addr);
if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1)
goto tidy_up_and_fail;
if (size != sizeof (connect_addr))
goto abort_tidy_up_and_fail;
if (connect(connector, (struct sockaddr *) &connect_addr,
sizeof(connect_addr)) == -1)
goto tidy_up_and_fail;
size = sizeof(listen_addr);
acceptor = accept(listener, (struct sockaddr *) &listen_addr, &size);
if (acceptor < 0)
goto tidy_up_and_fail;
if (size != sizeof(listen_addr))
goto abort_tidy_up_and_fail;
EVUTIL_CLOSESOCKET(listener);
/* Now check we are talking to ourself by matching port and host on the
two sockets. */
if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1)
goto tidy_up_and_fail;
if (size != sizeof (connect_addr)
|| listen_addr.sin_family != connect_addr.sin_family
|| listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr
|| listen_addr.sin_port != connect_addr.sin_port)
goto abort_tidy_up_and_fail;
fd[0] = connector;
fd[1] = acceptor;
return 0;
abort_tidy_up_and_fail:
saved_errno = WSAECONNABORTED;
tidy_up_and_fail:
if (saved_errno < 0)
saved_errno = WSAGetLastError();
if (listener != -1)
EVUTIL_CLOSESOCKET(listener);
if (connector != -1)
EVUTIL_CLOSESOCKET(connector);
if (acceptor != -1)
EVUTIL_CLOSESOCKET(acceptor);
EVUTIL_SET_SOCKET_ERROR(saved_errno);
return -1;
#endif
}
int
evutil_make_socket_nonblocking(int fd)
{
#ifdef WIN32
{
unsigned long nonblocking = 1;
ioctlsocket(fd, FIONBIO, (unsigned long*) &nonblocking);
}
#else
{
int flags;
if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
event_warn("fcntl(%d, F_GETFL)", fd);
return -1;
}
if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
event_warn("fcntl(%d, F_SETFL)", fd);
return -1;
}
}
#endif
return 0;
}
ev_int64_t
evutil_strtoll(const char *s, char **endptr, int base)
{
#ifdef HAVE_STRTOLL
return (ev_int64_t)strtoll(s, endptr, base);
#elif SIZEOF_LONG == 8
return (ev_int64_t)strtol(s, endptr, base);
#elif defined(WIN32) && defined(_MSC_VER) && _MSC_VER < 1300
/* XXXX on old versions of MS APIs, we only support base
* 10. */
ev_int64_t r;
if (base != 10)
return 0;
r = (ev_int64_t) _atoi64(s);
while (isspace(*s))
++s;
while (isdigit(*s))
++s;
if (endptr)
*endptr = (char*) s;
return r;
#elif defined(WIN32)
return (ev_int64_t) _strtoi64(s, endptr, base);
#else
#error "I don't know how to parse 64-bit integers."
#endif
}
#ifndef _EVENT_HAVE_GETTIMEOFDAY
int
evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
{
struct _timeb tb;
if(tv == NULL)
return -1;
_ftime(&tb);
tv->tv_sec = (long) tb.time;
tv->tv_usec = ((int) tb.millitm) * 1000;
return 0;
}
#endif
int
evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
{
int r;
va_list ap;
va_start(ap, format);
r = evutil_vsnprintf(buf, buflen, format, ap);
va_end(ap);
return r;
}
int
evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
{
#ifdef _MSC_VER
int r = _vsnprintf(buf, buflen, format, ap);
buf[buflen-1] = '\0';
if (r >= 0)
return r;
else
return _vscprintf(format, ap);
#else
int r = vsnprintf(buf, buflen, format, ap);
buf[buflen-1] = '\0';
return r;
#endif
}
static int
evutil_issetugid(void)
{
#ifdef _EVENT_HAVE_ISSETUGID
return issetugid();
#else
#ifdef _EVENT_HAVE_GETEUID
if (getuid() != geteuid())
return 1;
#endif
#ifdef _EVENT_HAVE_GETEGID
if (getgid() != getegid())
return 1;
#endif
return 0;
#endif
}
const char *
evutil_getenv(const char *varname)
{
if (evutil_issetugid())
return NULL;
return getenv(varname);
}

View file

@ -0,0 +1,186 @@
/*
* Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVUTIL_H_
#define _EVUTIL_H_
/** @file evutil.h
Common convenience functions for cross-platform portability and
related socket manipulations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef _EVENT_HAVE_STDINT_H
#include <stdint.h>
#elif defined(_EVENT_HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <stdarg.h>
#ifdef _EVENT_HAVE_UINT64_T
#define ev_uint64_t uint64_t
#define ev_int64_t int64_t
#elif defined(WIN32)
#define ev_uint64_t unsigned __int64
#define ev_int64_t signed __int64
#elif _EVENT_SIZEOF_LONG_LONG == 8
#define ev_uint64_t unsigned long long
#define ev_int64_t long long
#elif _EVENT_SIZEOF_LONG == 8
#define ev_uint64_t unsigned long
#define ev_int64_t long
#else
#error "No way to define ev_uint64_t"
#endif
#ifdef _EVENT_HAVE_UINT32_T
#define ev_uint32_t uint32_t
#elif defined(WIN32)
#define ev_uint32_t unsigned int
#elif _EVENT_SIZEOF_LONG == 4
#define ev_uint32_t unsigned long
#elif _EVENT_SIZEOF_INT == 4
#define ev_uint32_t unsigned int
#else
#error "No way to define ev_uint32_t"
#endif
#ifdef _EVENT_HAVE_UINT16_T
#define ev_uint16_t uint16_t
#elif defined(WIN32)
#define ev_uint16_t unsigned short
#elif _EVENT_SIZEOF_INT == 2
#define ev_uint16_t unsigned int
#elif _EVENT_SIZEOF_SHORT == 2
#define ev_uint16_t unsigned short
#else
#error "No way to define ev_uint16_t"
#endif
#ifdef _EVENT_HAVE_UINT8_T
#define ev_uint8_t uint8_t
#else
#define ev_uint8_t unsigned char
#endif
int evutil_socketpair(int d, int type, int protocol, int sv[2]);
int evutil_make_socket_nonblocking(int sock);
#ifdef WIN32
#define EVUTIL_CLOSESOCKET(s) closesocket(s)
#else
#define EVUTIL_CLOSESOCKET(s) close(s)
#endif
#ifdef WIN32
#define EVUTIL_SOCKET_ERROR() WSAGetLastError()
#define EVUTIL_SET_SOCKET_ERROR(errcode) \
do { WSASetLastError(errcode); } while (0)
#else
#define EVUTIL_SOCKET_ERROR() (errno)
#define EVUTIL_SET_SOCKET_ERROR(errcode) \
do { errno = (errcode); } while (0)
#endif
/*
* Manipulation functions for struct timeval
*/
#ifdef _EVENT_HAVE_TIMERADD
#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp))
#else
#define evutil_timeradd(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
if ((vvp)->tv_usec >= 1000000) { \
(vvp)->tv_sec++; \
(vvp)->tv_usec -= 1000000; \
} \
} while (0)
#define evutil_timersub(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
if ((vvp)->tv_usec < 0) { \
(vvp)->tv_sec--; \
(vvp)->tv_usec += 1000000; \
} \
} while (0)
#endif /* !_EVENT_HAVE_HAVE_TIMERADD */
#ifdef _EVENT_HAVE_TIMERCLEAR
#define evutil_timerclear(tvp) timerclear(tvp)
#else
#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
#endif
#define evutil_timercmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? \
((tvp)->tv_usec cmp (uvp)->tv_usec) : \
((tvp)->tv_sec cmp (uvp)->tv_sec))
#ifdef _EVENT_HAVE_TIMERISSET
#define evutil_timerisset(tvp) timerisset(tvp)
#else
#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#endif
/* big-int related functions */
ev_int64_t evutil_strtoll(const char *s, char **endptr, int base);
#ifdef _EVENT_HAVE_GETTIMEOFDAY
#define evutil_gettimeofday(tv, tz) gettimeofday((tv), (tz))
#else
struct timezone;
int evutil_gettimeofday(struct timeval *tv, struct timezone *tz);
#endif
int evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 3, 4)))
#endif
;
int evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap);
#ifdef __cplusplus
}
#endif
#endif /* _EVUTIL_H_ */

View file

@ -0,0 +1,153 @@
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* This header file contains definitions for dealing with HTTP requests
* that are internal to libevent. As user of the library, you should not
* need to know about these.
*/
#ifndef _HTTP_H_
#define _HTTP_H_
#define HTTP_CONNECT_TIMEOUT 45
#define HTTP_WRITE_TIMEOUT 50
#define HTTP_READ_TIMEOUT 50
#define HTTP_PREFIX "http://"
#define HTTP_DEFAULTPORT 80
enum message_read_status {
ALL_DATA_READ = 1,
MORE_DATA_EXPECTED = 0,
DATA_CORRUPTED = -1,
REQUEST_CANCELED = -2
};
enum evhttp_connection_error {
EVCON_HTTP_TIMEOUT,
EVCON_HTTP_EOF,
EVCON_HTTP_INVALID_HEADER
};
struct evbuffer;
struct evhttp_request;
/* A stupid connection object - maybe make this a bufferevent later */
enum evhttp_connection_state {
EVCON_DISCONNECTED, /**< not currently connected not trying either*/
EVCON_CONNECTING, /**< tries to currently connect */
EVCON_IDLE, /**< connection is established */
EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or
**< Status-Line (outgoing conn) */
EVCON_READING_HEADERS, /**< reading request/response headers */
EVCON_READING_BODY, /**< reading request/response body */
EVCON_READING_TRAILER, /**< reading request/response chunked trailer */
EVCON_WRITING /**< writing request/response headers/body */
};
struct event_base;
struct evhttp_connection {
/* we use tailq only if they were created for an http server */
TAILQ_ENTRY(evhttp_connection) (next);
int fd;
struct event ev;
struct event close_ev;
struct evbuffer *input_buffer;
struct evbuffer *output_buffer;
char *bind_address; /* address to use for binding the src */
u_short bind_port; /* local port for binding the src */
char *address; /* address to connect to */
u_short port;
int flags;
#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */
#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */
#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */
int timeout; /* timeout in seconds for events */
int retry_cnt; /* retry count */
int retry_max; /* maximum number of retries */
enum evhttp_connection_state state;
/* for server connections, the http server they are connected with */
struct evhttp *http_server;
TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
void (*cb)(struct evhttp_connection *, void *);
void *cb_arg;
void (*closecb)(struct evhttp_connection *, void *);
void *closecb_arg;
struct event_base *base;
};
struct evhttp_cb {
TAILQ_ENTRY(evhttp_cb) next;
char *what;
void (*cb)(struct evhttp_request *req, void *);
void *cbarg;
};
/* both the http server as well as the rpc system need to queue connections */
TAILQ_HEAD(evconq, evhttp_connection);
/* each bound socket is stored in one of these */
struct evhttp_bound_socket {
TAILQ_ENTRY(evhttp_bound_socket) (next);
struct event bind_ev;
};
struct evhttp {
TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
struct evconq connections;
int timeout;
void (*gencb)(struct evhttp_request *req, void *);
void *gencbarg;
struct event_base *base;
};
/* resets the connection; can be reused for more requests */
void evhttp_connection_reset(struct evhttp_connection *);
/* connects if necessary */
int evhttp_connection_connect(struct evhttp_connection *);
/* notifies the current request that it failed; resets connection */
void evhttp_connection_fail(struct evhttp_connection *,
enum evhttp_connection_error error);
void evhttp_get_request(struct evhttp *, int, struct sockaddr *, socklen_t);
int evhttp_hostportfile(char *, char **, u_short *, char **);
int evhttp_parse_firstline(struct evhttp_request *, struct evbuffer*);
int evhttp_parse_headers(struct evhttp_request *, struct evbuffer*);
void evhttp_start_read(struct evhttp_connection *);
void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
void evhttp_write_buffer(struct evhttp_connection *,
void (*)(struct evhttp_connection *, void *), void *);
/* response sending HTML the data in the buffer */
void evhttp_response_code(struct evhttp_request *, int, const char *);
void evhttp_send_page(struct evhttp_request *, struct evbuffer *);
#endif /* _HTTP_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,432 @@
/* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#define _GNU_SOURCE 1
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#include <sys/queue.h>
#include <sys/event.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
/* Some platforms apparently define the udata field of struct kevent as
* intptr_t, whereas others define it as void*. There doesn't seem to be an
* easy way to tell them apart via autoconf, so we need to use OS macros. */
#if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
#define PTR_TO_UDATA(x) ((intptr_t)(x))
#else
#define PTR_TO_UDATA(x) (x)
#endif
#include "event.h"
#include "event-internal.h"
#include "log.h"
#include "evsignal.h"
#define EVLIST_X_KQINKERNEL 0x1000
#define NEVENT 64
struct kqop {
struct kevent *changes;
int nchanges;
struct kevent *events;
struct event_list evsigevents[NSIG];
int nevents;
int kq;
pid_t pid;
};
static void *kq_init (struct event_base *);
static int kq_add (void *, struct event *);
static int kq_del (void *, struct event *);
static int kq_dispatch (struct event_base *, void *, struct timeval *);
static int kq_insert (struct kqop *, struct kevent *);
static void kq_dealloc (struct event_base *, void *);
const struct eventop kqops = {
"kqueue",
kq_init,
kq_add,
kq_del,
kq_dispatch,
kq_dealloc,
1 /* need reinit */
};
static void *
kq_init(struct event_base *base)
{
int i, kq;
struct kqop *kqueueop;
/* Disable kqueue when this environment variable is set */
if (evutil_getenv("EVENT_NOKQUEUE"))
return (NULL);
if (!(kqueueop = calloc(1, sizeof(struct kqop))))
return (NULL);
/* Initalize the kernel queue */
if ((kq = kqueue()) == -1) {
event_warn("kqueue");
free (kqueueop);
return (NULL);
}
kqueueop->kq = kq;
kqueueop->pid = getpid();
/* Initalize fields */
kqueueop->changes = malloc(NEVENT * sizeof(struct kevent));
if (kqueueop->changes == NULL) {
free (kqueueop);
return (NULL);
}
kqueueop->events = malloc(NEVENT * sizeof(struct kevent));
if (kqueueop->events == NULL) {
free (kqueueop->changes);
free (kqueueop);
return (NULL);
}
kqueueop->nevents = NEVENT;
/* we need to keep track of multiple events per signal */
for (i = 0; i < NSIG; ++i) {
TAILQ_INIT(&kqueueop->evsigevents[i]);
}
return (kqueueop);
}
static int
kq_insert(struct kqop *kqop, struct kevent *kev)
{
int nevents = kqop->nevents;
if (kqop->nchanges == nevents) {
struct kevent *newchange;
struct kevent *newresult;
nevents *= 2;
newchange = realloc(kqop->changes,
nevents * sizeof(struct kevent));
if (newchange == NULL) {
event_warn("%s: malloc", __func__);
return (-1);
}
kqop->changes = newchange;
newresult = realloc(kqop->events,
nevents * sizeof(struct kevent));
/*
* If we fail, we don't have to worry about freeing,
* the next realloc will pick it up.
*/
if (newresult == NULL) {
event_warn("%s: malloc", __func__);
return (-1);
}
kqop->events = newresult;
kqop->nevents = nevents;
}
memcpy(&kqop->changes[kqop->nchanges++], kev, sizeof(struct kevent));
event_debug(("%s: fd %d %s%s",
__func__, (int)kev->ident,
kev->filter == EVFILT_READ ? "EVFILT_READ" : "EVFILT_WRITE",
kev->flags == EV_DELETE ? " (del)" : ""));
return (0);
}
static void
kq_sighandler(int sig)
{
/* Do nothing here */
}
static int
kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
struct kqop *kqop = arg;
struct kevent *changes = kqop->changes;
struct kevent *events = kqop->events;
struct event *ev;
struct timespec ts, *ts_p = NULL;
int i, res;
if (tv != NULL) {
TIMEVAL_TO_TIMESPEC(tv, &ts);
ts_p = &ts;
}
res = kevent(kqop->kq, changes, kqop->nchanges,
events, kqop->nevents, ts_p);
kqop->nchanges = 0;
if (res == -1) {
if (errno != EINTR) {
event_warn("kevent");
return (-1);
}
return (0);
}
event_debug(("%s: kevent reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
if (events[i].flags & EV_ERROR) {
/*
* Error messages that can happen, when a delete fails.
* EBADF happens when the file discriptor has been
* closed,
* ENOENT when the file discriptor was closed and
* then reopened.
* EINVAL for some reasons not understood; EINVAL
* should not be returned ever; but FreeBSD does :-\
* An error is also indicated when a callback deletes
* an event we are still processing. In that case
* the data field is set to ENOENT.
*/
if (events[i].data == EBADF ||
events[i].data == EINVAL ||
events[i].data == ENOENT)
continue;
errno = events[i].data;
return (-1);
}
if (events[i].filter == EVFILT_READ) {
which |= EV_READ;
} else if (events[i].filter == EVFILT_WRITE) {
which |= EV_WRITE;
} else if (events[i].filter == EVFILT_SIGNAL) {
which |= EV_SIGNAL;
}
if (!which)
continue;
if (events[i].filter == EVFILT_SIGNAL) {
struct event_list *head =
(struct event_list *)events[i].udata;
TAILQ_FOREACH(ev, head, ev_signal_next) {
event_active(ev, which, events[i].data);
}
} else {
ev = (struct event *)events[i].udata;
if (!(ev->ev_events & EV_PERSIST))
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
event_active(ev, which, 1);
}
}
return (0);
}
static int
kq_add(void *arg, struct event *ev)
{
struct kqop *kqop = arg;
struct kevent kev;
if (ev->ev_events & EV_SIGNAL) {
int nsignal = EVENT_SIGNAL(ev);
assert(nsignal >= 0 && nsignal < NSIG);
if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
struct timespec timeout = { 0, 0 };
memset(&kev, 0, sizeof(kev));
kev.ident = nsignal;
kev.filter = EVFILT_SIGNAL;
kev.flags = EV_ADD;
kev.udata = PTR_TO_UDATA(&kqop->evsigevents[nsignal]);
/* Be ready for the signal if it is sent any
* time between now and the next call to
* kq_dispatch. */
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
if (_evsignal_set_handler(ev->ev_base, nsignal,
kq_sighandler) == -1)
return (-1);
}
TAILQ_INSERT_TAIL(&kqop->evsigevents[nsignal], ev,
ev_signal_next);
ev->ev_flags |= EVLIST_X_KQINKERNEL;
return (0);
}
if (ev->ev_events & EV_READ) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_READ;
#ifdef NOTE_EOF
/* Make it behave like select() and poll() */
kev.fflags = NOTE_EOF;
#endif
kev.flags = EV_ADD;
if (!(ev->ev_events & EV_PERSIST))
kev.flags |= EV_ONESHOT;
kev.udata = PTR_TO_UDATA(ev);
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags |= EVLIST_X_KQINKERNEL;
}
if (ev->ev_events & EV_WRITE) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_WRITE;
kev.flags = EV_ADD;
if (!(ev->ev_events & EV_PERSIST))
kev.flags |= EV_ONESHOT;
kev.udata = PTR_TO_UDATA(ev);
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags |= EVLIST_X_KQINKERNEL;
}
return (0);
}
static int
kq_del(void *arg, struct event *ev)
{
struct kqop *kqop = arg;
struct kevent kev;
if (!(ev->ev_flags & EVLIST_X_KQINKERNEL))
return (0);
if (ev->ev_events & EV_SIGNAL) {
int nsignal = EVENT_SIGNAL(ev);
struct timespec timeout = { 0, 0 };
assert(nsignal >= 0 && nsignal < NSIG);
TAILQ_REMOVE(&kqop->evsigevents[nsignal], ev, ev_signal_next);
if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
memset(&kev, 0, sizeof(kev));
kev.ident = nsignal;
kev.filter = EVFILT_SIGNAL;
kev.flags = EV_DELETE;
/* Because we insert signal events
* immediately, we need to delete them
* immediately, too */
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
if (_evsignal_restore_handler(ev->ev_base,
nsignal) == -1)
return (-1);
}
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
return (0);
}
if (ev->ev_events & EV_READ) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_READ;
kev.flags = EV_DELETE;
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
}
if (ev->ev_events & EV_WRITE) {
memset(&kev, 0, sizeof(kev));
kev.ident = ev->ev_fd;
kev.filter = EVFILT_WRITE;
kev.flags = EV_DELETE;
if (kq_insert(kqop, &kev) == -1)
return (-1);
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
}
return (0);
}
static void
kq_dealloc(struct event_base *base, void *arg)
{
struct kqop *kqop = arg;
evsignal_dealloc(base);
if (kqop->changes)
free(kqop->changes);
if (kqop->events)
free(kqop->events);
if (kqop->kq >= 0 && kqop->pid == getpid())
close(kqop->kq);
memset(kqop, 0, sizeof(struct kqop));
free(kqop);
}

View file

@ -0,0 +1,185 @@
/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* log.c
*
* Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
*
* Copyright (c) 2005 Nick Mathewson <nickm@freehaven.net>
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* Copyright (c) 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "config.h"
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include "event.h"
#include "log.h"
#include "evutil.h"
static void _warn_helper(int severity, int log_errno, const char *fmt,
va_list ap);
static void event_log(int severity, const char *msg);
void
event_err(int eval, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, errno, fmt, ap);
va_end(ap);
exit(eval);
}
void
event_warn(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, errno, fmt, ap);
va_end(ap);
}
void
event_errx(int eval, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, -1, fmt, ap);
va_end(ap);
exit(eval);
}
void
event_warnx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, -1, fmt, ap);
va_end(ap);
}
void
event_msgx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_MSG, -1, fmt, ap);
va_end(ap);
}
void
_event_debugx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_DEBUG, -1, fmt, ap);
va_end(ap);
}
static void
_warn_helper(int severity, int log_errno, const char *fmt, va_list ap)
{
char buf[1024];
size_t len;
if (fmt != NULL)
evutil_vsnprintf(buf, sizeof(buf), fmt, ap);
else
buf[0] = '\0';
if (log_errno >= 0) {
len = strlen(buf);
if (len < sizeof(buf) - 3) {
evutil_snprintf(buf + len, sizeof(buf) - len, ": %s",
strerror(log_errno));
}
}
event_log(severity, buf);
}
static event_log_cb log_fn = NULL;
void
event_set_log_callback(event_log_cb cb)
{
log_fn = cb;
}
static void
event_log(int severity, const char *msg)
{
if (log_fn)
log_fn(severity, msg);
else {
const char *severity_str;
switch (severity) {
case _EVENT_LOG_DEBUG:
severity_str = "debug";
break;
case _EVENT_LOG_MSG:
severity_str = "msg";
break;
case _EVENT_LOG_WARN:
severity_str = "warn";
break;
case _EVENT_LOG_ERR:
severity_str = "err";
break;
default:
severity_str = "???";
break;
}
(void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
}
}

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LOG_H_
#define _LOG_H_
#ifdef __GNUC__
#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
#else
#define EV_CHECK_FMT(a,b)
#endif
void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
#ifdef USE_DEBUG
#define event_debug(x) _event_debugx x
#else
#define event_debug(x) do {;} while (0)
#endif
#undef EV_CHECK_FMT
#endif

View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MIN_HEAP_H_
#define _MIN_HEAP_H_
#include "event.h"
#include "evutil.h"
typedef struct min_heap
{
struct event** p;
unsigned n, a;
} min_heap_t;
static inline void min_heap_ctor(min_heap_t* s);
static inline void min_heap_dtor(min_heap_t* s);
static inline void min_heap_elem_init(struct event* e);
static inline int min_heap_elem_greater(struct event *a, struct event *b);
static inline int min_heap_empty(min_heap_t* s);
static inline unsigned min_heap_size(min_heap_t* s);
static inline struct event* min_heap_top(min_heap_t* s);
static inline int min_heap_reserve(min_heap_t* s, unsigned n);
static inline int min_heap_push(min_heap_t* s, struct event* e);
static inline struct event* min_heap_pop(min_heap_t* s);
static inline int min_heap_erase(min_heap_t* s, struct event* e);
static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
int min_heap_elem_greater(struct event *a, struct event *b)
{
return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
}
void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
void min_heap_dtor(min_heap_t* s) { if(s->p) free(s->p); }
void min_heap_elem_init(struct event* e) { e->min_heap_idx = -1; }
int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
unsigned min_heap_size(min_heap_t* s) { return s->n; }
struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
int min_heap_push(min_heap_t* s, struct event* e)
{
if(min_heap_reserve(s, s->n + 1))
return -1;
min_heap_shift_up_(s, s->n++, e);
return 0;
}
struct event* min_heap_pop(min_heap_t* s)
{
if(s->n)
{
struct event* e = *s->p;
min_heap_shift_down_(s, 0u, s->p[--s->n]);
e->min_heap_idx = -1;
return e;
}
return 0;
}
int min_heap_erase(min_heap_t* s, struct event* e)
{
if(((unsigned int)-1) != e->min_heap_idx)
{
struct event *last = s->p[--s->n];
unsigned parent = (e->min_heap_idx - 1) / 2;
/* we replace e with the last element in the heap. We might need to
shift it upward if it is less than its parent, or downward if it is
greater than one or both its children. Since the children are known
to be less than the parent, it can't need to shift both up and
down. */
if (e->min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
min_heap_shift_up_(s, e->min_heap_idx, last);
else
min_heap_shift_down_(s, e->min_heap_idx, last);
e->min_heap_idx = -1;
return 0;
}
return -1;
}
int min_heap_reserve(min_heap_t* s, unsigned n)
{
if(s->a < n)
{
struct event** p;
unsigned a = s->a ? s->a * 2 : 8;
if(a < n)
a = n;
if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
return -1;
s->p = p;
s->a = a;
}
return 0;
}
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned parent = (hole_index - 1) / 2;
while(hole_index && min_heap_elem_greater(s->p[parent], e))
{
(s->p[hole_index] = s->p[parent])->min_heap_idx = hole_index;
hole_index = parent;
parent = (hole_index - 1) / 2;
}
(s->p[hole_index] = e)->min_heap_idx = hole_index;
}
void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned min_child = 2 * (hole_index + 1);
while(min_child <= s->n)
{
min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
if(!(min_heap_elem_greater(e, s->p[min_child])))
break;
(s->p[hole_index] = s->p[min_child])->min_heap_idx = hole_index;
hole_index = min_child;
min_child = 2 * (hole_index + 1);
}
min_heap_shift_up_(s, hole_index, e);
}
#endif /* _MIN_HEAP_H_ */

View file

@ -0,0 +1,378 @@
/* $OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#include <sys/queue.h>
#include <poll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef CHECK_INVARIANTS
#include <assert.h>
#endif
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
struct pollop {
int event_count; /* Highest number alloc */
int nfds; /* Size of event_* */
int fd_count; /* Size of idxplus1_by_fd */
struct pollfd *event_set;
struct event **event_r_back;
struct event **event_w_back;
int *idxplus1_by_fd; /* Index into event_set by fd; we add 1 so
* that 0 (which is easy to memset) can mean
* "no entry." */
};
static void *poll_init (struct event_base *);
static int poll_add (void *, struct event *);
static int poll_del (void *, struct event *);
static int poll_dispatch (struct event_base *, void *, struct timeval *);
static void poll_dealloc (struct event_base *, void *);
const struct eventop pollops = {
"poll",
poll_init,
poll_add,
poll_del,
poll_dispatch,
poll_dealloc,
0
};
static void *
poll_init(struct event_base *base)
{
struct pollop *pollop;
/* Disable poll when this environment variable is set */
if (evutil_getenv("EVENT_NOPOLL"))
return (NULL);
if (!(pollop = calloc(1, sizeof(struct pollop))))
return (NULL);
evsignal_init(base);
return (pollop);
}
#ifdef CHECK_INVARIANTS
static void
poll_check_ok(struct pollop *pop)
{
int i, idx;
struct event *ev;
for (i = 0; i < pop->fd_count; ++i) {
idx = pop->idxplus1_by_fd[i]-1;
if (idx < 0)
continue;
assert(pop->event_set[idx].fd == i);
if (pop->event_set[idx].events & POLLIN) {
ev = pop->event_r_back[idx];
assert(ev);
assert(ev->ev_events & EV_READ);
assert(ev->ev_fd == i);
}
if (pop->event_set[idx].events & POLLOUT) {
ev = pop->event_w_back[idx];
assert(ev);
assert(ev->ev_events & EV_WRITE);
assert(ev->ev_fd == i);
}
}
for (i = 0; i < pop->nfds; ++i) {
struct pollfd *pfd = &pop->event_set[i];
assert(pop->idxplus1_by_fd[pfd->fd] == i+1);
}
}
#else
#define poll_check_ok(pop)
#endif
static int
poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
int res, i, j, msec = -1, nfds;
struct pollop *pop = arg;
poll_check_ok(pop);
if (tv != NULL)
msec = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
nfds = pop->nfds;
res = poll(pop->event_set, nfds, msec);
if (res == -1) {
if (errno != EINTR) {
event_warn("poll");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: poll reports %d", __func__, res));
if (res == 0 || nfds == 0)
return (0);
i = random() % nfds;
for (j = 0; j < nfds; j++) {
struct event *r_ev = NULL, *w_ev = NULL;
int what;
if (++i == nfds)
i = 0;
what = pop->event_set[i].revents;
if (!what)
continue;
res = 0;
/* If the file gets closed notify */
if (what & (POLLHUP|POLLERR))
what |= POLLIN|POLLOUT;
if (what & POLLIN) {
res |= EV_READ;
r_ev = pop->event_r_back[i];
}
if (what & POLLOUT) {
res |= EV_WRITE;
w_ev = pop->event_w_back[i];
}
if (res == 0)
continue;
if (r_ev && (res & r_ev->ev_events)) {
event_active(r_ev, res & r_ev->ev_events, 1);
}
if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
event_active(w_ev, res & w_ev->ev_events, 1);
}
}
return (0);
}
static int
poll_add(void *arg, struct event *ev)
{
struct pollop *pop = arg;
struct pollfd *pfd = NULL;
int i;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
return (0);
poll_check_ok(pop);
if (pop->nfds + 1 >= pop->event_count) {
struct pollfd *tmp_event_set;
struct event **tmp_event_r_back;
struct event **tmp_event_w_back;
int tmp_event_count;
if (pop->event_count < 32)
tmp_event_count = 32;
else
tmp_event_count = pop->event_count * 2;
/* We need more file descriptors */
tmp_event_set = realloc(pop->event_set,
tmp_event_count * sizeof(struct pollfd));
if (tmp_event_set == NULL) {
event_warn("realloc");
return (-1);
}
pop->event_set = tmp_event_set;
tmp_event_r_back = realloc(pop->event_r_back,
tmp_event_count * sizeof(struct event *));
if (tmp_event_r_back == NULL) {
/* event_set overallocated; that's okay. */
event_warn("realloc");
return (-1);
}
pop->event_r_back = tmp_event_r_back;
tmp_event_w_back = realloc(pop->event_w_back,
tmp_event_count * sizeof(struct event *));
if (tmp_event_w_back == NULL) {
/* event_set and event_r_back overallocated; that's
* okay. */
event_warn("realloc");
return (-1);
}
pop->event_w_back = tmp_event_w_back;
pop->event_count = tmp_event_count;
}
if (ev->ev_fd >= pop->fd_count) {
int *tmp_idxplus1_by_fd;
int new_count;
if (pop->fd_count < 32)
new_count = 32;
else
new_count = pop->fd_count * 2;
while (new_count <= ev->ev_fd)
new_count *= 2;
tmp_idxplus1_by_fd =
realloc(pop->idxplus1_by_fd, new_count * sizeof(int));
if (tmp_idxplus1_by_fd == NULL) {
event_warn("realloc");
return (-1);
}
pop->idxplus1_by_fd = tmp_idxplus1_by_fd;
memset(pop->idxplus1_by_fd + pop->fd_count,
0, sizeof(int)*(new_count - pop->fd_count));
pop->fd_count = new_count;
}
i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
if (i >= 0) {
pfd = &pop->event_set[i];
} else {
i = pop->nfds++;
pfd = &pop->event_set[i];
pfd->events = 0;
pfd->fd = ev->ev_fd;
pop->event_w_back[i] = pop->event_r_back[i] = NULL;
pop->idxplus1_by_fd[ev->ev_fd] = i + 1;
}
pfd->revents = 0;
if (ev->ev_events & EV_WRITE) {
pfd->events |= POLLOUT;
pop->event_w_back[i] = ev;
}
if (ev->ev_events & EV_READ) {
pfd->events |= POLLIN;
pop->event_r_back[i] = ev;
}
poll_check_ok(pop);
return (0);
}
/*
* Nothing to be done here.
*/
static int
poll_del(void *arg, struct event *ev)
{
struct pollop *pop = arg;
struct pollfd *pfd = NULL;
int i;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
return (0);
poll_check_ok(pop);
i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
if (i < 0)
return (-1);
/* Do we still want to read or write? */
pfd = &pop->event_set[i];
if (ev->ev_events & EV_READ) {
pfd->events &= ~POLLIN;
pop->event_r_back[i] = NULL;
}
if (ev->ev_events & EV_WRITE) {
pfd->events &= ~POLLOUT;
pop->event_w_back[i] = NULL;
}
poll_check_ok(pop);
if (pfd->events)
/* Another event cares about that fd. */
return (0);
/* Okay, so we aren't interested in that fd anymore. */
pop->idxplus1_by_fd[ev->ev_fd] = 0;
--pop->nfds;
if (i != pop->nfds) {
/*
* Shift the last pollfd down into the now-unoccupied
* position.
*/
memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
sizeof(struct pollfd));
pop->event_r_back[i] = pop->event_r_back[pop->nfds];
pop->event_w_back[i] = pop->event_w_back[pop->nfds];
pop->idxplus1_by_fd[pop->event_set[i].fd] = i + 1;
}
poll_check_ok(pop);
return (0);
}
static void
poll_dealloc(struct event_base *base, void *arg)
{
struct pollop *pop = arg;
evsignal_dealloc(base);
if (pop->event_set)
free(pop->event_set);
if (pop->event_r_back)
free(pop->event_r_back);
if (pop->event_w_back)
free(pop->event_w_back);
if (pop->idxplus1_by_fd)
free(pop->idxplus1_by_fd);
memset(pop, 0, sizeof(struct pollop));
free(pop);
}

View file

@ -0,0 +1,363 @@
/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#else
#include <sys/_libevent_time.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#include <sys/queue.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef CHECK_INVARIANTS
#include <assert.h>
#endif
#include "event.h"
#include "evutil.h"
#include "event-internal.h"
#include "evsignal.h"
#include "log.h"
#ifndef howmany
#define howmany(x, y) (((x)+((y)-1))/(y))
#endif
#ifndef _EVENT_HAVE_FD_MASK
/* This type is mandatory, but Android doesn't define it. */
#undef NFDBITS
#define NFDBITS (sizeof(long)*8)
typedef unsigned long fd_mask;
#endif
struct selectop {
int event_fds; /* Highest fd in fd set */
int event_fdsz;
fd_set *event_readset_in;
fd_set *event_writeset_in;
fd_set *event_readset_out;
fd_set *event_writeset_out;
struct event **event_r_by_fd;
struct event **event_w_by_fd;
};
static void *select_init (struct event_base *);
static int select_add (void *, struct event *);
static int select_del (void *, struct event *);
static int select_dispatch (struct event_base *, void *, struct timeval *);
static void select_dealloc (struct event_base *, void *);
const struct eventop selectops = {
"select",
select_init,
select_add,
select_del,
select_dispatch,
select_dealloc,
0
};
static int select_resize(struct selectop *sop, int fdsz);
static void *
select_init(struct event_base *base)
{
struct selectop *sop;
/* Disable select when this environment variable is set */
if (evutil_getenv("EVENT_NOSELECT"))
return (NULL);
if (!(sop = calloc(1, sizeof(struct selectop))))
return (NULL);
select_resize(sop, howmany(32 + 1, NFDBITS)*sizeof(fd_mask));
evsignal_init(base);
return (sop);
}
#ifdef CHECK_INVARIANTS
static void
check_selectop(struct selectop *sop)
{
int i;
for (i = 0; i <= sop->event_fds; ++i) {
if (FD_ISSET(i, sop->event_readset_in)) {
assert(sop->event_r_by_fd[i]);
assert(sop->event_r_by_fd[i]->ev_events & EV_READ);
assert(sop->event_r_by_fd[i]->ev_fd == i);
} else {
assert(! sop->event_r_by_fd[i]);
}
if (FD_ISSET(i, sop->event_writeset_in)) {
assert(sop->event_w_by_fd[i]);
assert(sop->event_w_by_fd[i]->ev_events & EV_WRITE);
assert(sop->event_w_by_fd[i]->ev_fd == i);
} else {
assert(! sop->event_w_by_fd[i]);
}
}
}
#else
#define check_selectop(sop) do { (void) sop; } while (0)
#endif
static int
select_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
int res, i, j;
struct selectop *sop = arg;
check_selectop(sop);
memcpy(sop->event_readset_out, sop->event_readset_in,
sop->event_fdsz);
memcpy(sop->event_writeset_out, sop->event_writeset_in,
sop->event_fdsz);
res = select(sop->event_fds + 1, sop->event_readset_out,
sop->event_writeset_out, NULL, tv);
check_selectop(sop);
if (res == -1) {
if (errno != EINTR) {
event_warn("select");
return (-1);
}
evsignal_process(base);
return (0);
} else if (base->sig.evsignal_caught) {
evsignal_process(base);
}
event_debug(("%s: select reports %d", __func__, res));
check_selectop(sop);
i = random() % (sop->event_fds+1);
for (j = 0; j <= sop->event_fds; ++j) {
struct event *r_ev = NULL, *w_ev = NULL;
if (++i >= sop->event_fds+1)
i = 0;
res = 0;
if (FD_ISSET(i, sop->event_readset_out)) {
r_ev = sop->event_r_by_fd[i];
res |= EV_READ;
}
if (FD_ISSET(i, sop->event_writeset_out)) {
w_ev = sop->event_w_by_fd[i];
res |= EV_WRITE;
}
if (r_ev && (res & r_ev->ev_events)) {
event_active(r_ev, res & r_ev->ev_events, 1);
}
if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
event_active(w_ev, res & w_ev->ev_events, 1);
}
}
check_selectop(sop);
return (0);
}
static int
select_resize(struct selectop *sop, int fdsz)
{
int n_events, n_events_old;
fd_set *readset_in = NULL;
fd_set *writeset_in = NULL;
fd_set *readset_out = NULL;
fd_set *writeset_out = NULL;
struct event **r_by_fd = NULL;
struct event **w_by_fd = NULL;
n_events = (fdsz/sizeof(fd_mask)) * NFDBITS;
n_events_old = (sop->event_fdsz/sizeof(fd_mask)) * NFDBITS;
if (sop->event_readset_in)
check_selectop(sop);
if ((readset_in = realloc(sop->event_readset_in, fdsz)) == NULL)
goto error;
sop->event_readset_in = readset_in;
if ((readset_out = realloc(sop->event_readset_out, fdsz)) == NULL)
goto error;
sop->event_readset_out = readset_out;
if ((writeset_in = realloc(sop->event_writeset_in, fdsz)) == NULL)
goto error;
sop->event_writeset_in = writeset_in;
if ((writeset_out = realloc(sop->event_writeset_out, fdsz)) == NULL)
goto error;
sop->event_writeset_out = writeset_out;
if ((r_by_fd = realloc(sop->event_r_by_fd,
n_events*sizeof(struct event*))) == NULL)
goto error;
sop->event_r_by_fd = r_by_fd;
if ((w_by_fd = realloc(sop->event_w_by_fd,
n_events * sizeof(struct event*))) == NULL)
goto error;
sop->event_w_by_fd = w_by_fd;
memset((char *)sop->event_readset_in + sop->event_fdsz, 0,
fdsz - sop->event_fdsz);
memset((char *)sop->event_writeset_in + sop->event_fdsz, 0,
fdsz - sop->event_fdsz);
memset(sop->event_r_by_fd + n_events_old, 0,
(n_events-n_events_old) * sizeof(struct event*));
memset(sop->event_w_by_fd + n_events_old, 0,
(n_events-n_events_old) * sizeof(struct event*));
sop->event_fdsz = fdsz;
check_selectop(sop);
return (0);
error:
event_warn("malloc");
return (-1);
}
static int
select_add(void *arg, struct event *ev)
{
struct selectop *sop = arg;
if (ev->ev_events & EV_SIGNAL)
return (evsignal_add(ev));
check_selectop(sop);
/*
* Keep track of the highest fd, so that we can calculate the size
* of the fd_sets for select(2)
*/
if (sop->event_fds < ev->ev_fd) {
int fdsz = sop->event_fdsz;
if (fdsz < sizeof(fd_mask))
fdsz = sizeof(fd_mask);
while (fdsz <
(howmany(ev->ev_fd + 1, NFDBITS) * sizeof(fd_mask)))
fdsz *= 2;
if (fdsz != sop->event_fdsz) {
if (select_resize(sop, fdsz)) {
check_selectop(sop);
return (-1);
}
}
sop->event_fds = ev->ev_fd;
}
if (ev->ev_events & EV_READ) {
FD_SET(ev->ev_fd, sop->event_readset_in);
sop->event_r_by_fd[ev->ev_fd] = ev;
}
if (ev->ev_events & EV_WRITE) {
FD_SET(ev->ev_fd, sop->event_writeset_in);
sop->event_w_by_fd[ev->ev_fd] = ev;
}
check_selectop(sop);
return (0);
}
/*
* Nothing to be done here.
*/
static int
select_del(void *arg, struct event *ev)
{
struct selectop *sop = arg;
check_selectop(sop);
if (ev->ev_events & EV_SIGNAL)
return (evsignal_del(ev));
if (sop->event_fds < ev->ev_fd) {
check_selectop(sop);
return (0);
}
if (ev->ev_events & EV_READ) {
FD_CLR(ev->ev_fd, sop->event_readset_in);
sop->event_r_by_fd[ev->ev_fd] = NULL;
}
if (ev->ev_events & EV_WRITE) {
FD_CLR(ev->ev_fd, sop->event_writeset_in);
sop->event_w_by_fd[ev->ev_fd] = NULL;
}
check_selectop(sop);
return (0);
}
static void
select_dealloc(struct event_base *base, void *arg)
{
struct selectop *sop = arg;
evsignal_dealloc(base);
if (sop->event_readset_in)
free(sop->event_readset_in);
if (sop->event_writeset_in)
free(sop->event_writeset_in);
if (sop->event_readset_out)
free(sop->event_readset_out);
if (sop->event_writeset_out)
free(sop->event_writeset_out);
if (sop->event_r_by_fd)
free(sop->event_r_by_fd);
if (sop->event_w_by_fd)
free(sop->event_w_by_fd);
memset(sop, 0, sizeof(struct selectop));
free(sop);
}

View file

@ -0,0 +1,376 @@
/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <errno.h>
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <assert.h>
#include "event.h"
#include "event-internal.h"
#include "evsignal.h"
#include "evutil.h"
#include "log.h"
struct event_base *evsignal_base = NULL;
static void evsignal_handler(int sig);
#ifdef WIN32
#define error_is_eagain(err) \
((err) == EAGAIN || (err) == WSAEWOULDBLOCK)
#else
#define error_is_eagain(err) ((err) == EAGAIN)
#endif
/* Callback for when the signal handler write a byte to our signaling socket */
static void
evsignal_cb(int fd, short what, void *arg)
{
static char signals[1];
#ifdef WIN32
SSIZE_T n;
#else
ssize_t n;
#endif
n = recv(fd, signals, sizeof(signals), 0);
if (n == -1) {
int err = EVUTIL_SOCKET_ERROR();
if (! error_is_eagain(err))
event_err(1, "%s: read", __func__);
}
}
#ifdef HAVE_SETFD
#define FD_CLOSEONEXEC(x) do { \
if (fcntl(x, F_SETFD, 1) == -1) \
event_warn("fcntl(%d, F_SETFD)", x); \
} while (0)
#else
#define FD_CLOSEONEXEC(x)
#endif
int
evsignal_init(struct event_base *base)
{
int i;
/*
* Our signal handler is going to write to one end of the socket
* pair to wake up our event loop. The event loop then scans for
* signals that got delivered.
*/
if (evutil_socketpair(
AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1) {
#ifdef WIN32
/* Make this nonfatal on win32, where sometimes people
have localhost firewalled. */
event_warn("%s: socketpair", __func__);
#else
event_err(1, "%s: socketpair", __func__);
#endif
return -1;
}
FD_CLOSEONEXEC(base->sig.ev_signal_pair[0]);
FD_CLOSEONEXEC(base->sig.ev_signal_pair[1]);
base->sig.sh_old = NULL;
base->sig.sh_old_max = 0;
base->sig.evsignal_caught = 0;
memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG);
/* initialize the queues for all events */
for (i = 0; i < NSIG; ++i)
TAILQ_INIT(&base->sig.evsigevents[i]);
evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
evutil_make_socket_nonblocking(base->sig.ev_signal_pair[1]);
event_set(&base->sig.ev_signal, base->sig.ev_signal_pair[1],
EV_READ | EV_PERSIST, evsignal_cb, &base->sig.ev_signal);
base->sig.ev_signal.ev_base = base;
base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
return 0;
}
/* Helper: set the signal handler for evsignal to handler in base, so that
* we can restore the original handler when we clear the current one. */
int
_evsignal_set_handler(struct event_base *base,
int evsignal, void (*handler)(int))
{
#ifdef HAVE_SIGACTION
struct sigaction sa;
#else
ev_sighandler_t sh;
#endif
struct evsignal_info *sig = &base->sig;
void *p;
/*
* resize saved signal handler array up to the highest signal number.
* a dynamic array is used to keep footprint on the low side.
*/
if (evsignal >= sig->sh_old_max) {
int new_max = evsignal + 1;
event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing",
__func__, evsignal, sig->sh_old_max));
p = realloc(sig->sh_old, new_max * sizeof(*sig->sh_old));
if (p == NULL) {
event_warn("realloc");
return (-1);
}
memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old),
0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old));
sig->sh_old_max = new_max;
sig->sh_old = p;
}
/* allocate space for previous handler out of dynamic array */
sig->sh_old[evsignal] = malloc(sizeof *sig->sh_old[evsignal]);
if (sig->sh_old[evsignal] == NULL) {
event_warn("malloc");
return (-1);
}
/* save previous handler and setup new handler */
#ifdef HAVE_SIGACTION
memset(&sa, 0, sizeof(sa));
sa.sa_handler = handler;
sa.sa_flags |= SA_RESTART;
sigfillset(&sa.sa_mask);
if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) {
event_warn("sigaction");
free(sig->sh_old[evsignal]);
sig->sh_old[evsignal] = NULL;
return (-1);
}
#else
if ((sh = signal(evsignal, handler)) == SIG_ERR) {
event_warn("signal");
free(sig->sh_old[evsignal]);
sig->sh_old[evsignal] = NULL;
return (-1);
}
*sig->sh_old[evsignal] = sh;
#endif
return (0);
}
int
evsignal_add(struct event *ev)
{
int evsignal;
struct event_base *base = ev->ev_base;
struct evsignal_info *sig = &ev->ev_base->sig;
if (ev->ev_events & (EV_READ|EV_WRITE))
event_errx(1, "%s: EV_SIGNAL incompatible use", __func__);
evsignal = EVENT_SIGNAL(ev);
assert(evsignal >= 0 && evsignal < NSIG);
if (TAILQ_EMPTY(&sig->evsigevents[evsignal])) {
event_debug(("%s: %p: changing signal handler", __func__, ev));
if (_evsignal_set_handler(
base, evsignal, evsignal_handler) == -1)
return (-1);
/* catch signals if they happen quickly */
evsignal_base = base;
if (!sig->ev_signal_added) {
if (event_add(&sig->ev_signal, NULL))
return (-1);
sig->ev_signal_added = 1;
}
}
/* multiple events may listen to the same signal */
TAILQ_INSERT_TAIL(&sig->evsigevents[evsignal], ev, ev_signal_next);
return (0);
}
int
_evsignal_restore_handler(struct event_base *base, int evsignal)
{
int ret = 0;
struct evsignal_info *sig = &base->sig;
#ifdef HAVE_SIGACTION
struct sigaction *sh;
#else
ev_sighandler_t *sh;
#endif
/* restore previous handler */
sh = sig->sh_old[evsignal];
sig->sh_old[evsignal] = NULL;
#ifdef HAVE_SIGACTION
if (sigaction(evsignal, sh, NULL) == -1) {
event_warn("sigaction");
ret = -1;
}
#else
if (signal(evsignal, *sh) == SIG_ERR) {
event_warn("signal");
ret = -1;
}
#endif
free(sh);
return ret;
}
int
evsignal_del(struct event *ev)
{
struct event_base *base = ev->ev_base;
struct evsignal_info *sig = &base->sig;
int evsignal = EVENT_SIGNAL(ev);
assert(evsignal >= 0 && evsignal < NSIG);
/* multiple events may listen to the same signal */
TAILQ_REMOVE(&sig->evsigevents[evsignal], ev, ev_signal_next);
if (!TAILQ_EMPTY(&sig->evsigevents[evsignal]))
return (0);
event_debug(("%s: %p: restoring signal handler", __func__, ev));
return (_evsignal_restore_handler(ev->ev_base, EVENT_SIGNAL(ev)));
}
static void
evsignal_handler(int sig)
{
int save_errno = errno;
if (evsignal_base == NULL) {
event_warn(
"%s: received signal %d, but have no base configured",
__func__, sig);
return;
}
evsignal_base->sig.evsigcaught[sig]++;
evsignal_base->sig.evsignal_caught = 1;
#ifndef HAVE_SIGACTION
signal(sig, evsignal_handler);
#endif
/* Wake up our notification mechanism */
send(evsignal_base->sig.ev_signal_pair[0], "a", 1, 0);
errno = save_errno;
}
void
evsignal_process(struct event_base *base)
{
struct evsignal_info *sig = &base->sig;
struct event *ev, *next_ev;
sig_atomic_t ncalls;
int i;
base->sig.evsignal_caught = 0;
for (i = 1; i < NSIG; ++i) {
ncalls = sig->evsigcaught[i];
if (ncalls == 0)
continue;
sig->evsigcaught[i] -= ncalls;
for (ev = TAILQ_FIRST(&sig->evsigevents[i]);
ev != NULL; ev = next_ev) {
next_ev = TAILQ_NEXT(ev, ev_signal_next);
if (!(ev->ev_events & EV_PERSIST))
event_del(ev);
event_active(ev, EV_SIGNAL, ncalls);
}
}
}
void
evsignal_dealloc(struct event_base *base)
{
int i = 0;
if (base->sig.ev_signal_added) {
event_del(&base->sig.ev_signal);
base->sig.ev_signal_added = 0;
}
for (i = 0; i < NSIG; ++i) {
if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
_evsignal_restore_handler(base, i);
}
if (base->sig.ev_signal_pair[0] != -1) {
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
base->sig.ev_signal_pair[0] = -1;
}
if (base->sig.ev_signal_pair[1] != -1) {
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
base->sig.ev_signal_pair[1] = -1;
}
base->sig.sh_old_max = 0;
/* per index frees are handled in evsig_del() */
if (base->sig.sh_old) {
free(base->sig.sh_old);
base->sig.sh_old = NULL;
}
}

View file

@ -0,0 +1 @@
timestamp

View file

@ -0,0 +1,21 @@
#ifndef _STRLCPY_INTERNAL_H_
#define _STRLCPY_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "config.h"
#ifndef HAVE_STRLCPY
#include <string.h>
size_t _event_strlcpy(char *dst, const char *src, size_t siz);
#define strlcpy _event_strlcpy
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,74 @@
/* $OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $ */
/*
* Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $";
#endif /* LIBC_SCCS and not lint */
#include <sys/types.h>
#include "config.h"
#ifndef HAVE_STRLCPY
#include "strlcpy-internal.h"
/*
* Copy src to string dst of size siz. At most siz-1 characters
* will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
*/
size_t
_event_strlcpy(dst, src, siz)
char *dst;
const char *src;
size_t siz;
{
register char *d = dst;
register const char *s = src;
register size_t n = siz;
/* Copy as many bytes as will fit */
if (n != 0 && --n != 0) {
do {
if ((*d++ = *s++) == 0)
break;
} while (--n != 0);
}
/* Not enough room in dst, add NUL and traverse rest of src */
if (n == 0) {
if (siz != 0)
*d = '\0'; /* NUL-terminate dst */
while (*s++)
;
}
return(s - src - 1); /* count does not include NUL */
}
#endif