diff options
Diffstat (limited to 'lib/asio/detail/impl')
50 files changed, 15058 insertions, 0 deletions
diff --git a/lib/asio/detail/impl/buffer_sequence_adapter.ipp b/lib/asio/detail/impl/buffer_sequence_adapter.ipp new file mode 100644 index 0000000..323c8ad --- /dev/null +++ b/lib/asio/detail/impl/buffer_sequence_adapter.ipp @@ -0,0 +1,118 @@ +// +// detail/impl/buffer_sequence_adapter.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP +#define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS_RUNTIME) + +#include <robuffer.h> +#include <windows.storage.streams.h> +#include <wrl/implements.h> +#include "asio/detail/buffer_sequence_adapter.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +class winrt_buffer_impl : + public Microsoft::WRL::RuntimeClass< + Microsoft::WRL::RuntimeClassFlags< + Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>, + ABI::Windows::Storage::Streams::IBuffer, + Windows::Storage::Streams::IBufferByteAccess> +{ +public: + explicit winrt_buffer_impl(const asio::const_buffer& b) + { + bytes_ = const_cast<byte*>(static_cast<const byte*>(b.data())); + length_ = b.size(); + capacity_ = b.size(); + } + + explicit winrt_buffer_impl(const asio::mutable_buffer& b) + { + bytes_ = static_cast<byte*>(b.data()); + length_ = 0; + capacity_ = b.size(); + } + + ~winrt_buffer_impl() + { + } + + STDMETHODIMP Buffer(byte** value) + { + *value = bytes_; + return S_OK; + } + + STDMETHODIMP get_Capacity(UINT32* value) + { + *value = capacity_; + return S_OK; + } + + STDMETHODIMP get_Length(UINT32 *value) + { + *value = length_; + return S_OK; + } + + STDMETHODIMP put_Length(UINT32 value) + { + if (value > capacity_) + return E_INVALIDARG; + length_ = value; + return S_OK; + } + +private: + byte* bytes_; + UINT32 length_; + UINT32 capacity_; +}; + +void buffer_sequence_adapter_base::init_native_buffer( + buffer_sequence_adapter_base::native_buffer_type& buf, + const asio::mutable_buffer& buffer) +{ + std::memset(&buf, 0, sizeof(native_buffer_type)); + Microsoft::WRL::ComPtr<IInspectable> insp + = Microsoft::WRL::Make<winrt_buffer_impl>(buffer); + buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get()); +} + +void buffer_sequence_adapter_base::init_native_buffer( + buffer_sequence_adapter_base::native_buffer_type& buf, + const asio::const_buffer& buffer) +{ + std::memset(&buf, 0, sizeof(native_buffer_type)); + Microsoft::WRL::ComPtr<IInspectable> insp + = Microsoft::WRL::Make<winrt_buffer_impl>(buffer); + Platform::Object^ buf_obj = reinterpret_cast<Platform::Object^>(insp.Get()); + buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get()); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP diff --git a/lib/asio/detail/impl/descriptor_ops.ipp b/lib/asio/detail/impl/descriptor_ops.ipp new file mode 100644 index 0000000..1af643f --- /dev/null +++ b/lib/asio/detail/impl/descriptor_ops.ipp @@ -0,0 +1,474 @@ +// +// detail/impl/descriptor_ops.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP +#define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include <cerrno> +#include "asio/detail/descriptor_ops.hpp" +#include "asio/error.hpp" + +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { +namespace descriptor_ops { + +int open(const char* path, int flags, asio::error_code& ec) +{ + errno = 0; + int result = error_wrapper(::open(path, flags), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +} + +int close(int d, state_type& state, asio::error_code& ec) +{ + int result = 0; + if (d != -1) + { + errno = 0; + result = error_wrapper(::close(d), ec); + + if (result != 0 + && (ec == asio::error::would_block + || ec == asio::error::try_again)) + { + // According to UNIX Network Programming Vol. 1, it is possible for + // close() to fail with EWOULDBLOCK under certain circumstances. What + // isn't clear is the state of the descriptor after this error. The one + // current OS where this behaviour is seen, Windows, says that the socket + // remains open. Therefore we'll put the descriptor back into blocking + // mode and have another attempt at closing it. +#if defined(__SYMBIAN32__) + int flags = ::fcntl(d, F_GETFL, 0); + if (flags >= 0) + ::fcntl(d, F_SETFL, flags & ~O_NONBLOCK); +#else // defined(__SYMBIAN32__) + ioctl_arg_type arg = 0; + ::ioctl(d, FIONBIO, &arg); +#endif // defined(__SYMBIAN32__) + state &= ~non_blocking; + + errno = 0; + result = error_wrapper(::close(d), ec); + } + } + + if (result == 0) + ec = asio::error_code(); + return result; +} + +bool set_user_non_blocking(int d, state_type& state, + bool value, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return false; + } + + errno = 0; +#if defined(__SYMBIAN32__) + int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); + if (result >= 0) + { + errno = 0; + int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); + result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); + } +#else // defined(__SYMBIAN32__) + ioctl_arg_type arg = (value ? 1 : 0); + int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); +#endif // defined(__SYMBIAN32__) + + if (result >= 0) + { + ec = asio::error_code(); + if (value) + state |= user_set_non_blocking; + else + { + // Clearing the user-set non-blocking mode always overrides any + // internally-set non-blocking flag. Any subsequent asynchronous + // operations will need to re-enable non-blocking I/O. + state &= ~(user_set_non_blocking | internal_non_blocking); + } + return true; + } + + return false; +} + +bool set_internal_non_blocking(int d, state_type& state, + bool value, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return false; + } + + if (!value && (state & user_set_non_blocking)) + { + // It does not make sense to clear the internal non-blocking flag if the + // user still wants non-blocking behaviour. Return an error and let the + // caller figure out whether to update the user-set non-blocking flag. + ec = asio::error::invalid_argument; + return false; + } + + errno = 0; +#if defined(__SYMBIAN32__) + int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); + if (result >= 0) + { + errno = 0; + int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); + result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); + } +#else // defined(__SYMBIAN32__) + ioctl_arg_type arg = (value ? 1 : 0); + int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); +#endif // defined(__SYMBIAN32__) + + if (result >= 0) + { + ec = asio::error_code(); + if (value) + state |= internal_non_blocking; + else + state &= ~internal_non_blocking; + return true; + } + + return false; +} + +std::size_t sync_read(int d, state_type state, buf* bufs, + std::size_t count, bool all_empty, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // A request to read 0 bytes on a stream is a no-op. + if (all_empty) + { + ec = asio::error_code(); + return 0; + } + + // Read some data. + for (;;) + { + // Try to complete the operation without blocking. + errno = 0; + signed_size_type bytes = error_wrapper(::readv( + d, bufs, static_cast<int>(count)), ec); + + // Check if operation succeeded. + if (bytes > 0) + return bytes; + + // Check for EOF. + if (bytes == 0) + { + ec = asio::error::eof; + return 0; + } + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for descriptor to become ready. + if (descriptor_ops::poll_read(d, 0, ec) < 0) + return 0; + } +} + +bool non_blocking_read(int d, buf* bufs, std::size_t count, + asio::error_code& ec, std::size_t& bytes_transferred) +{ + for (;;) + { + // Read some data. + errno = 0; + signed_size_type bytes = error_wrapper(::readv( + d, bufs, static_cast<int>(count)), ec); + + // Check for end of stream. + if (bytes == 0) + { + ec = asio::error::eof; + return true; + } + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes > 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +std::size_t sync_write(int d, state_type state, const buf* bufs, + std::size_t count, bool all_empty, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // A request to write 0 bytes on a stream is a no-op. + if (all_empty) + { + ec = asio::error_code(); + return 0; + } + + // Write some data. + for (;;) + { + // Try to complete the operation without blocking. + errno = 0; + signed_size_type bytes = error_wrapper(::writev( + d, bufs, static_cast<int>(count)), ec); + + // Check if operation succeeded. + if (bytes > 0) + return bytes; + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for descriptor to become ready. + if (descriptor_ops::poll_write(d, 0, ec) < 0) + return 0; + } +} + +bool non_blocking_write(int d, const buf* bufs, std::size_t count, + asio::error_code& ec, std::size_t& bytes_transferred) +{ + for (;;) + { + // Write some data. + errno = 0; + signed_size_type bytes = error_wrapper(::writev( + d, bufs, static_cast<int>(count)), ec); + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes >= 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +int ioctl(int d, state_type& state, long cmd, + ioctl_arg_type* arg, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return -1; + } + + errno = 0; + int result = error_wrapper(::ioctl(d, cmd, arg), ec); + + if (result >= 0) + { + ec = asio::error_code(); + + // When updating the non-blocking mode we always perform the ioctl syscall, + // even if the flags would otherwise indicate that the descriptor is + // already in the correct state. This ensures that the underlying + // descriptor is put into the state that has been requested by the user. If + // the ioctl syscall was successful then we need to update the flags to + // match. + if (cmd == static_cast<long>(FIONBIO)) + { + if (*arg) + { + state |= user_set_non_blocking; + } + else + { + // Clearing the non-blocking mode always overrides any internally-set + // non-blocking flag. Any subsequent asynchronous operations will need + // to re-enable non-blocking I/O. + state &= ~(user_set_non_blocking | internal_non_blocking); + } + } + } + + return result; +} + +int fcntl(int d, int cmd, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return -1; + } + + errno = 0; + int result = error_wrapper(::fcntl(d, cmd), ec); + if (result != -1) + ec = asio::error_code(); + return result; +} + +int fcntl(int d, int cmd, long arg, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return -1; + } + + errno = 0; + int result = error_wrapper(::fcntl(d, cmd, arg), ec); + if (result != -1) + ec = asio::error_code(); + return result; +} + +int poll_read(int d, state_type state, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return -1; + } + + pollfd fds; + fds.fd = d; + fds.events = POLLIN; + fds.revents = 0; + int timeout = (state & user_set_non_blocking) ? 0 : -1; + errno = 0; + int result = error_wrapper(::poll(&fds, 1, timeout), ec); + if (result == 0) + ec = (state & user_set_non_blocking) + ? asio::error::would_block : asio::error_code(); + else if (result > 0) + ec = asio::error_code(); + return result; +} + +int poll_write(int d, state_type state, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return -1; + } + + pollfd fds; + fds.fd = d; + fds.events = POLLOUT; + fds.revents = 0; + int timeout = (state & user_set_non_blocking) ? 0 : -1; + errno = 0; + int result = error_wrapper(::poll(&fds, 1, timeout), ec); + if (result == 0) + ec = (state & user_set_non_blocking) + ? asio::error::would_block : asio::error_code(); + else if (result > 0) + ec = asio::error_code(); + return result; +} + +int poll_error(int d, state_type state, asio::error_code& ec) +{ + if (d == -1) + { + ec = asio::error::bad_descriptor; + return -1; + } + + pollfd fds; + fds.fd = d; + fds.events = POLLPRI | POLLERR | POLLHUP; + fds.revents = 0; + int timeout = (state & user_set_non_blocking) ? 0 : -1; + errno = 0; + int result = error_wrapper(::poll(&fds, 1, timeout), ec); + if (result == 0) + ec = (state & user_set_non_blocking) + ? asio::error::would_block : asio::error_code(); + else if (result > 0) + ec = asio::error_code(); + return result; +} + +} // namespace descriptor_ops +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + +#endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP diff --git a/lib/asio/detail/impl/dev_poll_reactor.hpp b/lib/asio/detail/impl/dev_poll_reactor.hpp new file mode 100644 index 0000000..4cd8aaf --- /dev/null +++ b/lib/asio/detail/impl/dev_poll_reactor.hpp @@ -0,0 +1,91 @@ +// +// detail/impl/dev_poll_reactor.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP +#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_DEV_POLL) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Time_Traits> +void dev_poll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_add_timer_queue(queue); +} + +template <typename Time_Traits> +void dev_poll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_remove_timer_queue(queue); +} + +template <typename Time_Traits> +void dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue, + const typename Time_Traits::time_type& time, + typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + scheduler_.post_immediate_completion(op, false); + return; + } + + bool earliest = queue.enqueue_timer(time, timer, op); + scheduler_.work_started(); + if (earliest) + interrupter_.interrupt(); +} + +template <typename Time_Traits> +std::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& timer, + std::size_t max_cancelled) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); + lock.unlock(); + scheduler_.post_deferred_completions(ops); + return n; +} + +template <typename Time_Traits> +void dev_poll_reactor::move_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& target, + typename timer_queue<Time_Traits>::per_timer_data& source) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + queue.cancel_timer(target, ops); + queue.move_timer(target, source); + lock.unlock(); + scheduler_.post_deferred_completions(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_DEV_POLL) + +#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP diff --git a/lib/asio/detail/impl/dev_poll_reactor.ipp b/lib/asio/detail/impl/dev_poll_reactor.ipp new file mode 100644 index 0000000..1ca376c --- /dev/null +++ b/lib/asio/detail/impl/dev_poll_reactor.ipp @@ -0,0 +1,446 @@ +// +// detail/impl/dev_poll_reactor.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP +#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_DEV_POLL) + +#include "asio/detail/dev_poll_reactor.hpp" +#include "asio/detail/assert.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +dev_poll_reactor::dev_poll_reactor(asio::execution_context& ctx) + : asio::detail::execution_context_service_base<dev_poll_reactor>(ctx), + scheduler_(use_service<scheduler>(ctx)), + mutex_(), + dev_poll_fd_(do_dev_poll_create()), + interrupter_(), + shutdown_(false) +{ + // Add the interrupter's descriptor to /dev/poll. + ::pollfd ev = { 0, 0, 0 }; + ev.fd = interrupter_.read_descriptor(); + ev.events = POLLIN | POLLERR; + ev.revents = 0; + ::write(dev_poll_fd_, &ev, sizeof(ev)); +} + +dev_poll_reactor::~dev_poll_reactor() +{ + shutdown(); + ::close(dev_poll_fd_); +} + +void dev_poll_reactor::shutdown() +{ + asio::detail::mutex::scoped_lock lock(mutex_); + shutdown_ = true; + lock.unlock(); + + op_queue<operation> ops; + + for (int i = 0; i < max_ops; ++i) + op_queue_[i].get_all_operations(ops); + + timer_queues_.get_all_timers(ops); + + scheduler_.abandon_operations(ops); +} + +void dev_poll_reactor::notify_fork( + asio::execution_context::fork_event fork_ev) +{ + if (fork_ev == asio::execution_context::fork_child) + { + detail::mutex::scoped_lock lock(mutex_); + + if (dev_poll_fd_ != -1) + ::close(dev_poll_fd_); + dev_poll_fd_ = -1; + dev_poll_fd_ = do_dev_poll_create(); + + interrupter_.recreate(); + + // Add the interrupter's descriptor to /dev/poll. + ::pollfd ev = { 0, 0, 0 }; + ev.fd = interrupter_.read_descriptor(); + ev.events = POLLIN | POLLERR; + ev.revents = 0; + ::write(dev_poll_fd_, &ev, sizeof(ev)); + + // Re-register all descriptors with /dev/poll. The changes will be written + // to the /dev/poll descriptor the next time the reactor is run. + for (int i = 0; i < max_ops; ++i) + { + reactor_op_queue<socket_type>::iterator iter = op_queue_[i].begin(); + reactor_op_queue<socket_type>::iterator end = op_queue_[i].end(); + for (; iter != end; ++iter) + { + ::pollfd& pending_ev = add_pending_event_change(iter->first); + pending_ev.events |= POLLERR | POLLHUP; + switch (i) + { + case read_op: pending_ev.events |= POLLIN; break; + case write_op: pending_ev.events |= POLLOUT; break; + case except_op: pending_ev.events |= POLLPRI; break; + default: break; + } + } + } + interrupter_.interrupt(); + } +} + +void dev_poll_reactor::init_task() +{ + scheduler_.init_task(); +} + +int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&) +{ + return 0; +} + +int dev_poll_reactor::register_internal_descriptor(int op_type, + socket_type descriptor, per_descriptor_data&, reactor_op* op) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + op_queue_[op_type].enqueue_operation(descriptor, op); + ::pollfd& ev = add_pending_event_change(descriptor); + ev.events = POLLERR | POLLHUP; + switch (op_type) + { + case read_op: ev.events |= POLLIN; break; + case write_op: ev.events |= POLLOUT; break; + case except_op: ev.events |= POLLPRI; break; + default: break; + } + interrupter_.interrupt(); + + return 0; +} + +void dev_poll_reactor::move_descriptor(socket_type, + dev_poll_reactor::per_descriptor_data&, + dev_poll_reactor::per_descriptor_data&) +{ +} + +void dev_poll_reactor::start_op(int op_type, socket_type descriptor, + dev_poll_reactor::per_descriptor_data&, reactor_op* op, + bool is_continuation, bool allow_speculative) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + post_immediate_completion(op, is_continuation); + return; + } + + if (allow_speculative) + { + if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor)) + { + if (!op_queue_[op_type].has_operation(descriptor)) + { + if (op->perform()) + { + lock.unlock(); + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + } + } + } + + bool first = op_queue_[op_type].enqueue_operation(descriptor, op); + scheduler_.work_started(); + if (first) + { + ::pollfd& ev = add_pending_event_change(descriptor); + ev.events = POLLERR | POLLHUP; + if (op_type == read_op + || op_queue_[read_op].has_operation(descriptor)) + ev.events |= POLLIN; + if (op_type == write_op + || op_queue_[write_op].has_operation(descriptor)) + ev.events |= POLLOUT; + if (op_type == except_op + || op_queue_[except_op].has_operation(descriptor)) + ev.events |= POLLPRI; + interrupter_.interrupt(); + } +} + +void dev_poll_reactor::cancel_ops(socket_type descriptor, + dev_poll_reactor::per_descriptor_data&) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + cancel_ops_unlocked(descriptor, asio::error::operation_aborted); +} + +void dev_poll_reactor::deregister_descriptor(socket_type descriptor, + dev_poll_reactor::per_descriptor_data&, bool) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + // Remove the descriptor from /dev/poll. + ::pollfd& ev = add_pending_event_change(descriptor); + ev.events = POLLREMOVE; + interrupter_.interrupt(); + + // Cancel any outstanding operations associated with the descriptor. + cancel_ops_unlocked(descriptor, asio::error::operation_aborted); +} + +void dev_poll_reactor::deregister_internal_descriptor( + socket_type descriptor, dev_poll_reactor::per_descriptor_data&) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + // Remove the descriptor from /dev/poll. Since this function is only called + // during a fork, we can apply the change immediately. + ::pollfd ev = { 0, 0, 0 }; + ev.fd = descriptor; + ev.events = POLLREMOVE; + ev.revents = 0; + ::write(dev_poll_fd_, &ev, sizeof(ev)); + + // Destroy all operations associated with the descriptor. + op_queue<operation> ops; + asio::error_code ec; + for (int i = 0; i < max_ops; ++i) + op_queue_[i].cancel_operations(descriptor, ops, ec); +} + +void dev_poll_reactor::cleanup_descriptor_data( + dev_poll_reactor::per_descriptor_data&) +{ +} + +void dev_poll_reactor::run(long usec, op_queue<operation>& ops) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + // We can return immediately if there's no work to do and the reactor is + // not supposed to block. + if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty() + && op_queue_[except_op].empty() && timer_queues_.all_empty()) + return; + + // Write the pending event registration changes to the /dev/poll descriptor. + std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size(); + if (events_size > 0) + { + errno = 0; + int result = ::write(dev_poll_fd_, + &pending_event_changes_[0], events_size); + if (result != static_cast<int>(events_size)) + { + asio::error_code ec = asio::error_code( + errno, asio::error::get_system_category()); + for (std::size_t i = 0; i < pending_event_changes_.size(); ++i) + { + int descriptor = pending_event_changes_[i].fd; + for (int j = 0; j < max_ops; ++j) + op_queue_[j].cancel_operations(descriptor, ops, ec); + } + } + pending_event_changes_.clear(); + pending_event_change_index_.clear(); + } + + // Calculate timeout. + int timeout; + if (usec == 0) + timeout = 0; + else + { + timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1); + timeout = get_timeout(timeout); + } + lock.unlock(); + + // Block on the /dev/poll descriptor. + ::pollfd events[128] = { { 0, 0, 0 } }; + ::dvpoll dp = { 0, 0, 0 }; + dp.dp_fds = events; + dp.dp_nfds = 128; + dp.dp_timeout = timeout; + int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp); + + lock.lock(); + + // Dispatch the waiting events. + for (int i = 0; i < num_events; ++i) + { + int descriptor = events[i].fd; + if (descriptor == interrupter_.read_descriptor()) + { + interrupter_.reset(); + } + else + { + bool more_reads = false; + bool more_writes = false; + bool more_except = false; + + // Exception operations must be processed first to ensure that any + // out-of-band data is read before normal data. + if (events[i].events & (POLLPRI | POLLERR | POLLHUP)) + more_except = + op_queue_[except_op].perform_operations(descriptor, ops); + else + more_except = op_queue_[except_op].has_operation(descriptor); + + if (events[i].events & (POLLIN | POLLERR | POLLHUP)) + more_reads = op_queue_[read_op].perform_operations(descriptor, ops); + else + more_reads = op_queue_[read_op].has_operation(descriptor); + + if (events[i].events & (POLLOUT | POLLERR | POLLHUP)) + more_writes = op_queue_[write_op].perform_operations(descriptor, ops); + else + more_writes = op_queue_[write_op].has_operation(descriptor); + + if ((events[i].events & (POLLERR | POLLHUP)) != 0 + && !more_except && !more_reads && !more_writes) + { + // If we have an event and no operations associated with the + // descriptor then we need to delete the descriptor from /dev/poll. + // The poll operation can produce POLLHUP or POLLERR events when there + // is no operation pending, so if we do not remove the descriptor we + // can end up in a tight polling loop. + ::pollfd ev = { 0, 0, 0 }; + ev.fd = descriptor; + ev.events = POLLREMOVE; + ev.revents = 0; + ::write(dev_poll_fd_, &ev, sizeof(ev)); + } + else + { + ::pollfd ev = { 0, 0, 0 }; + ev.fd = descriptor; + ev.events = POLLERR | POLLHUP; + if (more_reads) + ev.events |= POLLIN; + if (more_writes) + ev.events |= POLLOUT; + if (more_except) + ev.events |= POLLPRI; + ev.revents = 0; + int result = ::write(dev_poll_fd_, &ev, sizeof(ev)); + if (result != sizeof(ev)) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + for (int j = 0; j < max_ops; ++j) + op_queue_[j].cancel_operations(descriptor, ops, ec); + } + } + } + } + timer_queues_.get_ready_timers(ops); +} + +void dev_poll_reactor::interrupt() +{ + interrupter_.interrupt(); +} + +int dev_poll_reactor::do_dev_poll_create() +{ + int fd = ::open("/dev/poll", O_RDWR); + if (fd == -1) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "/dev/poll"); + } + return fd; +} + +void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.insert(&queue); +} + +void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.erase(&queue); +} + +int dev_poll_reactor::get_timeout(int msec) +{ + // By default we will wait no longer than 5 minutes. This will ensure that + // any changes to the system clock are detected after no longer than this. + const int max_msec = 5 * 60 * 1000; + return timer_queues_.wait_duration_msec( + (msec < 0 || max_msec < msec) ? max_msec : msec); +} + +void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor, + const asio::error_code& ec) +{ + bool need_interrupt = false; + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + need_interrupt = op_queue_[i].cancel_operations( + descriptor, ops, ec) || need_interrupt; + scheduler_.post_deferred_completions(ops); + if (need_interrupt) + interrupter_.interrupt(); +} + +::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor) +{ + hash_map<int, std::size_t>::iterator iter + = pending_event_change_index_.find(descriptor); + if (iter == pending_event_change_index_.end()) + { + std::size_t index = pending_event_changes_.size(); + pending_event_changes_.reserve(pending_event_changes_.size() + 1); + pending_event_change_index_.insert(std::make_pair(descriptor, index)); + pending_event_changes_.push_back(::pollfd()); + pending_event_changes_[index].fd = descriptor; + pending_event_changes_[index].revents = 0; + return pending_event_changes_[index]; + } + else + { + return pending_event_changes_[iter->second]; + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_DEV_POLL) + +#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP diff --git a/lib/asio/detail/impl/epoll_reactor.hpp b/lib/asio/detail/impl/epoll_reactor.hpp new file mode 100644 index 0000000..f990059 --- /dev/null +++ b/lib/asio/detail/impl/epoll_reactor.hpp @@ -0,0 +1,89 @@ +// +// detail/impl/epoll_reactor.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP +#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#if defined(ASIO_HAS_EPOLL) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Time_Traits> +void epoll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_add_timer_queue(queue); +} + +template <typename Time_Traits> +void epoll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_remove_timer_queue(queue); +} + +template <typename Time_Traits> +void epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue, + const typename Time_Traits::time_type& time, + typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op) +{ + mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + scheduler_.post_immediate_completion(op, false); + return; + } + + bool earliest = queue.enqueue_timer(time, timer, op); + scheduler_.work_started(); + if (earliest) + update_timeout(); +} + +template <typename Time_Traits> +std::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& timer, + std::size_t max_cancelled) +{ + mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); + lock.unlock(); + scheduler_.post_deferred_completions(ops); + return n; +} + +template <typename Time_Traits> +void epoll_reactor::move_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& target, + typename timer_queue<Time_Traits>::per_timer_data& source) +{ + mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + queue.cancel_timer(target, ops); + queue.move_timer(target, source); + lock.unlock(); + scheduler_.post_deferred_completions(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_EPOLL) + +#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP diff --git a/lib/asio/detail/impl/epoll_reactor.ipp b/lib/asio/detail/impl/epoll_reactor.ipp new file mode 100644 index 0000000..65584a4 --- /dev/null +++ b/lib/asio/detail/impl/epoll_reactor.ipp @@ -0,0 +1,787 @@ +// +// detail/impl/epoll_reactor.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP +#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_EPOLL) + +#include <cstddef> +#include <sys/epoll.h> +#include "asio/detail/epoll_reactor.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#if defined(ASIO_HAS_TIMERFD) +# include <sys/timerfd.h> +#endif // defined(ASIO_HAS_TIMERFD) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +epoll_reactor::epoll_reactor(asio::execution_context& ctx) + : execution_context_service_base<epoll_reactor>(ctx), + scheduler_(use_service<scheduler>(ctx)), + mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING( + REACTOR_REGISTRATION, scheduler_.concurrency_hint())), + interrupter_(), + epoll_fd_(do_epoll_create()), + timer_fd_(do_timerfd_create()), + shutdown_(false), + registered_descriptors_mutex_(mutex_.enabled()) +{ + // Add the interrupter's descriptor to epoll. + epoll_event ev = { 0, { 0 } }; + ev.events = EPOLLIN | EPOLLERR | EPOLLET; + ev.data.ptr = &interrupter_; + epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); + interrupter_.interrupt(); + + // Add the timer descriptor to epoll. + if (timer_fd_ != -1) + { + ev.events = EPOLLIN | EPOLLERR; + ev.data.ptr = &timer_fd_; + epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); + } +} + +epoll_reactor::~epoll_reactor() +{ + if (epoll_fd_ != -1) + close(epoll_fd_); + if (timer_fd_ != -1) + close(timer_fd_); +} + +void epoll_reactor::shutdown() +{ + mutex::scoped_lock lock(mutex_); + shutdown_ = true; + lock.unlock(); + + op_queue<operation> ops; + + while (descriptor_state* state = registered_descriptors_.first()) + { + for (int i = 0; i < max_ops; ++i) + ops.push(state->op_queue_[i]); + state->shutdown_ = true; + registered_descriptors_.free(state); + } + + timer_queues_.get_all_timers(ops); + + scheduler_.abandon_operations(ops); +} + +void epoll_reactor::notify_fork( + asio::execution_context::fork_event fork_ev) +{ + if (fork_ev == asio::execution_context::fork_child) + { + if (epoll_fd_ != -1) + ::close(epoll_fd_); + epoll_fd_ = -1; + epoll_fd_ = do_epoll_create(); + + if (timer_fd_ != -1) + ::close(timer_fd_); + timer_fd_ = -1; + timer_fd_ = do_timerfd_create(); + + interrupter_.recreate(); + + // Add the interrupter's descriptor to epoll. + epoll_event ev = { 0, { 0 } }; + ev.events = EPOLLIN | EPOLLERR | EPOLLET; + ev.data.ptr = &interrupter_; + epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); + interrupter_.interrupt(); + + // Add the timer descriptor to epoll. + if (timer_fd_ != -1) + { + ev.events = EPOLLIN | EPOLLERR; + ev.data.ptr = &timer_fd_; + epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); + } + + update_timeout(); + + // Re-register all descriptors with epoll. + mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); + for (descriptor_state* state = registered_descriptors_.first(); + state != 0; state = state->next_) + { + ev.events = state->registered_events_; + ev.data.ptr = state; + int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev); + if (result != 0) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "epoll re-registration"); + } + } + } +} + +void epoll_reactor::init_task() +{ + scheduler_.init_task(); +} + +int epoll_reactor::register_descriptor(socket_type descriptor, + epoll_reactor::per_descriptor_data& descriptor_data) +{ + descriptor_data = allocate_descriptor_state(); + + ASIO_HANDLER_REACTOR_REGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + { + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + descriptor_data->reactor_ = this; + descriptor_data->descriptor_ = descriptor; + descriptor_data->shutdown_ = false; + for (int i = 0; i < max_ops; ++i) + descriptor_data->try_speculative_[i] = true; + } + + epoll_event ev = { 0, { 0 } }; + ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; + descriptor_data->registered_events_ = ev.events; + ev.data.ptr = descriptor_data; + int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); + if (result != 0) + { + if (errno == EPERM) + { + // This file descriptor type is not supported by epoll. However, if it is + // a regular file then operations on it will not block. We will allow + // this descriptor to be used and fail later if an operation on it would + // otherwise require a trip through the reactor. + descriptor_data->registered_events_ = 0; + return 0; + } + return errno; + } + + return 0; +} + +int epoll_reactor::register_internal_descriptor( + int op_type, socket_type descriptor, + epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op) +{ + descriptor_data = allocate_descriptor_state(); + + ASIO_HANDLER_REACTOR_REGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + { + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + descriptor_data->reactor_ = this; + descriptor_data->descriptor_ = descriptor; + descriptor_data->shutdown_ = false; + descriptor_data->op_queue_[op_type].push(op); + for (int i = 0; i < max_ops; ++i) + descriptor_data->try_speculative_[i] = true; + } + + epoll_event ev = { 0, { 0 } }; + ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; + descriptor_data->registered_events_ = ev.events; + ev.data.ptr = descriptor_data; + int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); + if (result != 0) + return errno; + + return 0; +} + +void epoll_reactor::move_descriptor(socket_type, + epoll_reactor::per_descriptor_data& target_descriptor_data, + epoll_reactor::per_descriptor_data& source_descriptor_data) +{ + target_descriptor_data = source_descriptor_data; + source_descriptor_data = 0; +} + +void epoll_reactor::start_op(int op_type, socket_type descriptor, + epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op, + bool is_continuation, bool allow_speculative) +{ + if (!descriptor_data) + { + op->ec_ = asio::error::bad_descriptor; + post_immediate_completion(op, is_continuation); + return; + } + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (descriptor_data->shutdown_) + { + post_immediate_completion(op, is_continuation); + return; + } + + if (descriptor_data->op_queue_[op_type].empty()) + { + if (allow_speculative + && (op_type != read_op + || descriptor_data->op_queue_[except_op].empty())) + { + if (descriptor_data->try_speculative_[op_type]) + { + if (reactor_op::status status = op->perform()) + { + if (status == reactor_op::done_and_exhausted) + if (descriptor_data->registered_events_ != 0) + descriptor_data->try_speculative_[op_type] = false; + descriptor_lock.unlock(); + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + } + + if (descriptor_data->registered_events_ == 0) + { + op->ec_ = asio::error::operation_not_supported; + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + + if (op_type == write_op) + { + if ((descriptor_data->registered_events_ & EPOLLOUT) == 0) + { + epoll_event ev = { 0, { 0 } }; + ev.events = descriptor_data->registered_events_ | EPOLLOUT; + ev.data.ptr = descriptor_data; + if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0) + { + descriptor_data->registered_events_ |= ev.events; + } + else + { + op->ec_ = asio::error_code(errno, + asio::error::get_system_category()); + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + } + } + } + else if (descriptor_data->registered_events_ == 0) + { + op->ec_ = asio::error::operation_not_supported; + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + else + { + if (op_type == write_op) + { + descriptor_data->registered_events_ |= EPOLLOUT; + } + + epoll_event ev = { 0, { 0 } }; + ev.events = descriptor_data->registered_events_; + ev.data.ptr = descriptor_data; + epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev); + } + } + + descriptor_data->op_queue_[op_type].push(op); + scheduler_.work_started(); +} + +void epoll_reactor::cancel_ops(socket_type, + epoll_reactor::per_descriptor_data& descriptor_data) +{ + if (!descriptor_data) + return; + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + { + while (reactor_op* op = descriptor_data->op_queue_[i].front()) + { + op->ec_ = asio::error::operation_aborted; + descriptor_data->op_queue_[i].pop(); + ops.push(op); + } + } + + descriptor_lock.unlock(); + + scheduler_.post_deferred_completions(ops); +} + +void epoll_reactor::deregister_descriptor(socket_type descriptor, + epoll_reactor::per_descriptor_data& descriptor_data, bool closing) +{ + if (!descriptor_data) + return; + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (!descriptor_data->shutdown_) + { + if (closing) + { + // The descriptor will be automatically removed from the epoll set when + // it is closed. + } + else if (descriptor_data->registered_events_ != 0) + { + epoll_event ev = { 0, { 0 } }; + epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); + } + + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + { + while (reactor_op* op = descriptor_data->op_queue_[i].front()) + { + op->ec_ = asio::error::operation_aborted; + descriptor_data->op_queue_[i].pop(); + ops.push(op); + } + } + + descriptor_data->descriptor_ = -1; + descriptor_data->shutdown_ = true; + + descriptor_lock.unlock(); + + ASIO_HANDLER_REACTOR_DEREGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + scheduler_.post_deferred_completions(ops); + + // Leave descriptor_data set so that it will be freed by the subsequent + // call to cleanup_descriptor_data. + } + else + { + // We are shutting down, so prevent cleanup_descriptor_data from freeing + // the descriptor_data object and let the destructor free it instead. + descriptor_data = 0; + } +} + +void epoll_reactor::deregister_internal_descriptor(socket_type descriptor, + epoll_reactor::per_descriptor_data& descriptor_data) +{ + if (!descriptor_data) + return; + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (!descriptor_data->shutdown_) + { + epoll_event ev = { 0, { 0 } }; + epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); + + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + ops.push(descriptor_data->op_queue_[i]); + + descriptor_data->descriptor_ = -1; + descriptor_data->shutdown_ = true; + + descriptor_lock.unlock(); + + ASIO_HANDLER_REACTOR_DEREGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + // Leave descriptor_data set so that it will be freed by the subsequent + // call to cleanup_descriptor_data. + } + else + { + // We are shutting down, so prevent cleanup_descriptor_data from freeing + // the descriptor_data object and let the destructor free it instead. + descriptor_data = 0; + } +} + +void epoll_reactor::cleanup_descriptor_data( + per_descriptor_data& descriptor_data) +{ + if (descriptor_data) + { + free_descriptor_state(descriptor_data); + descriptor_data = 0; + } +} + +void epoll_reactor::run(long usec, op_queue<operation>& ops) +{ + // This code relies on the fact that the scheduler queues the reactor task + // behind all descriptor operations generated by this function. This means, + // that by the time we reach this point, any previously returned descriptor + // operations have already been dequeued. Therefore it is now safe for us to + // reuse and return them for the scheduler to queue again. + + // Calculate timeout. Check the timer queues only if timerfd is not in use. + int timeout; + if (usec == 0) + timeout = 0; + else + { + timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1); + if (timer_fd_ == -1) + { + mutex::scoped_lock lock(mutex_); + timeout = get_timeout(timeout); + } + } + + // Block on the epoll descriptor. + epoll_event events[128]; + int num_events = epoll_wait(epoll_fd_, events, 128, timeout); + +#if defined(ASIO_ENABLE_HANDLER_TRACKING) + // Trace the waiting events. + for (int i = 0; i < num_events; ++i) + { + void* ptr = events[i].data.ptr; + if (ptr == &interrupter_) + { + // Ignore. + } +# if defined(ASIO_HAS_TIMERFD) + else if (ptr == &timer_fd_) + { + // Ignore. + } +# endif // defined(ASIO_HAS_TIMERFD) + else + { + unsigned event_mask = 0; + if ((events[i].events & EPOLLIN) != 0) + event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT; + if ((events[i].events & EPOLLOUT)) + event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT; + if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0) + event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT; + ASIO_HANDLER_REACTOR_EVENTS((context(), + reinterpret_cast<uintmax_t>(ptr), event_mask)); + } + } +#endif // defined(ASIO_ENABLE_HANDLER_TRACKING) + +#if defined(ASIO_HAS_TIMERFD) + bool check_timers = (timer_fd_ == -1); +#else // defined(ASIO_HAS_TIMERFD) + bool check_timers = true; +#endif // defined(ASIO_HAS_TIMERFD) + + // Dispatch the waiting events. + for (int i = 0; i < num_events; ++i) + { + void* ptr = events[i].data.ptr; + if (ptr == &interrupter_) + { + // No need to reset the interrupter since we're leaving the descriptor + // in a ready-to-read state and relying on edge-triggered notifications + // to make it so that we only get woken up when the descriptor's epoll + // registration is updated. + +#if defined(ASIO_HAS_TIMERFD) + if (timer_fd_ == -1) + check_timers = true; +#else // defined(ASIO_HAS_TIMERFD) + check_timers = true; +#endif // defined(ASIO_HAS_TIMERFD) + } +#if defined(ASIO_HAS_TIMERFD) + else if (ptr == &timer_fd_) + { + check_timers = true; + } +#endif // defined(ASIO_HAS_TIMERFD) + else + { + // The descriptor operation doesn't count as work in and of itself, so we + // don't call work_started() here. This still allows the scheduler to + // stop if the only remaining operations are descriptor operations. + descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr); + if (!ops.is_enqueued(descriptor_data)) + { + descriptor_data->set_ready_events(events[i].events); + ops.push(descriptor_data); + } + else + { + descriptor_data->add_ready_events(events[i].events); + } + } + } + + if (check_timers) + { + mutex::scoped_lock common_lock(mutex_); + timer_queues_.get_ready_timers(ops); + +#if defined(ASIO_HAS_TIMERFD) + if (timer_fd_ != -1) + { + itimerspec new_timeout; + itimerspec old_timeout; + int flags = get_timeout(new_timeout); + timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); + } +#endif // defined(ASIO_HAS_TIMERFD) + } +} + +void epoll_reactor::interrupt() +{ + epoll_event ev = { 0, { 0 } }; + ev.events = EPOLLIN | EPOLLERR | EPOLLET; + ev.data.ptr = &interrupter_; + epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev); +} + +int epoll_reactor::do_epoll_create() +{ +#if defined(EPOLL_CLOEXEC) + int fd = epoll_create1(EPOLL_CLOEXEC); +#else // defined(EPOLL_CLOEXEC) + int fd = -1; + errno = EINVAL; +#endif // defined(EPOLL_CLOEXEC) + + if (fd == -1 && (errno == EINVAL || errno == ENOSYS)) + { + fd = epoll_create(epoll_size); + if (fd != -1) + ::fcntl(fd, F_SETFD, FD_CLOEXEC); + } + + if (fd == -1) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "epoll"); + } + + return fd; +} + +int epoll_reactor::do_timerfd_create() +{ +#if defined(ASIO_HAS_TIMERFD) +# if defined(TFD_CLOEXEC) + int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); +# else // defined(TFD_CLOEXEC) + int fd = -1; + errno = EINVAL; +# endif // defined(TFD_CLOEXEC) + + if (fd == -1 && errno == EINVAL) + { + fd = timerfd_create(CLOCK_MONOTONIC, 0); + if (fd != -1) + ::fcntl(fd, F_SETFD, FD_CLOEXEC); + } + + return fd; +#else // defined(ASIO_HAS_TIMERFD) + return -1; +#endif // defined(ASIO_HAS_TIMERFD) +} + +epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state() +{ + mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); + return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING( + REACTOR_IO, scheduler_.concurrency_hint())); +} + +void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s) +{ + mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); + registered_descriptors_.free(s); +} + +void epoll_reactor::do_add_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.insert(&queue); +} + +void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.erase(&queue); +} + +void epoll_reactor::update_timeout() +{ +#if defined(ASIO_HAS_TIMERFD) + if (timer_fd_ != -1) + { + itimerspec new_timeout; + itimerspec old_timeout; + int flags = get_timeout(new_timeout); + timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); + return; + } +#endif // defined(ASIO_HAS_TIMERFD) + interrupt(); +} + +int epoll_reactor::get_timeout(int msec) +{ + // By default we will wait no longer than 5 minutes. This will ensure that + // any changes to the system clock are detected after no longer than this. + const int max_msec = 5 * 60 * 1000; + return timer_queues_.wait_duration_msec( + (msec < 0 || max_msec < msec) ? max_msec : msec); +} + +#if defined(ASIO_HAS_TIMERFD) +int epoll_reactor::get_timeout(itimerspec& ts) +{ + ts.it_interval.tv_sec = 0; + ts.it_interval.tv_nsec = 0; + + long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); + ts.it_value.tv_sec = usec / 1000000; + ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1; + + return usec ? 0 : TFD_TIMER_ABSTIME; +} +#endif // defined(ASIO_HAS_TIMERFD) + +struct epoll_reactor::perform_io_cleanup_on_block_exit +{ + explicit perform_io_cleanup_on_block_exit(epoll_reactor* r) + : reactor_(r), first_op_(0) + { + } + + ~perform_io_cleanup_on_block_exit() + { + if (first_op_) + { + // Post the remaining completed operations for invocation. + if (!ops_.empty()) + reactor_->scheduler_.post_deferred_completions(ops_); + + // A user-initiated operation has completed, but there's no need to + // explicitly call work_finished() here. Instead, we'll take advantage of + // the fact that the scheduler will call work_finished() once we return. + } + else + { + // No user-initiated operations have completed, so we need to compensate + // for the work_finished() call that the scheduler will make once this + // operation returns. + reactor_->scheduler_.compensating_work_started(); + } + } + + epoll_reactor* reactor_; + op_queue<operation> ops_; + operation* first_op_; +}; + +epoll_reactor::descriptor_state::descriptor_state(bool locking) + : operation(&epoll_reactor::descriptor_state::do_complete), + mutex_(locking) +{ +} + +operation* epoll_reactor::descriptor_state::perform_io(uint32_t events) +{ + mutex_.lock(); + perform_io_cleanup_on_block_exit io_cleanup(reactor_); + mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock); + + // Exception operations must be processed first to ensure that any + // out-of-band data is read before normal data. + static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI }; + for (int j = max_ops - 1; j >= 0; --j) + { + if (events & (flag[j] | EPOLLERR | EPOLLHUP)) + { + try_speculative_[j] = true; + while (reactor_op* op = op_queue_[j].front()) + { + if (reactor_op::status status = op->perform()) + { + op_queue_[j].pop(); + io_cleanup.ops_.push(op); + if (status == reactor_op::done_and_exhausted) + { + try_speculative_[j] = false; + break; + } + } + else + break; + } + } + } + + // The first operation will be returned for completion now. The others will + // be posted for later by the io_cleanup object's destructor. + io_cleanup.first_op_ = io_cleanup.ops_.front(); + io_cleanup.ops_.pop(); + return io_cleanup.first_op_; +} + +void epoll_reactor::descriptor_state::do_complete( + void* owner, operation* base, + const asio::error_code& ec, std::size_t bytes_transferred) +{ + if (owner) + { + descriptor_state* descriptor_data = static_cast<descriptor_state*>(base); + uint32_t events = static_cast<uint32_t>(bytes_transferred); + if (operation* op = descriptor_data->perform_io(events)) + { + op->complete(owner, ec, 0); + } + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_EPOLL) + +#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP diff --git a/lib/asio/detail/impl/eventfd_select_interrupter.ipp b/lib/asio/detail/impl/eventfd_select_interrupter.ipp new file mode 100644 index 0000000..c56e89a --- /dev/null +++ b/lib/asio/detail/impl/eventfd_select_interrupter.ipp @@ -0,0 +1,165 @@ +// +// detail/impl/eventfd_select_interrupter.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP +#define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_EVENTFD) + +#include <sys/stat.h> +#include <sys/types.h> +#include <fcntl.h> +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 +# include <asm/unistd.h> +#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 +# include <sys/eventfd.h> +#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 +#include "asio/detail/cstdint.hpp" +#include "asio/detail/eventfd_select_interrupter.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +eventfd_select_interrupter::eventfd_select_interrupter() +{ + open_descriptors(); +} + +void eventfd_select_interrupter::open_descriptors() +{ +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 + write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0); + if (read_descriptor_ != -1) + { + ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); + ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); + } +#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 +# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) + write_descriptor_ = read_descriptor_ = + ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); +# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) + errno = EINVAL; + write_descriptor_ = read_descriptor_ = -1; +# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) + if (read_descriptor_ == -1 && errno == EINVAL) + { + write_descriptor_ = read_descriptor_ = ::eventfd(0, 0); + if (read_descriptor_ != -1) + { + ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); + ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); + } + } +#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 + + if (read_descriptor_ == -1) + { + int pipe_fds[2]; + if (pipe(pipe_fds) == 0) + { + read_descriptor_ = pipe_fds[0]; + ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); + ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); + write_descriptor_ = pipe_fds[1]; + ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); + ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); + } + else + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "eventfd_select_interrupter"); + } + } +} + +eventfd_select_interrupter::~eventfd_select_interrupter() +{ + close_descriptors(); +} + +void eventfd_select_interrupter::close_descriptors() +{ + if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_) + ::close(write_descriptor_); + if (read_descriptor_ != -1) + ::close(read_descriptor_); +} + +void eventfd_select_interrupter::recreate() +{ + close_descriptors(); + + write_descriptor_ = -1; + read_descriptor_ = -1; + + open_descriptors(); +} + +void eventfd_select_interrupter::interrupt() +{ + uint64_t counter(1UL); + int result = ::write(write_descriptor_, &counter, sizeof(uint64_t)); + (void)result; +} + +bool eventfd_select_interrupter::reset() +{ + if (write_descriptor_ == read_descriptor_) + { + for (;;) + { + // Only perform one read. The kernel maintains an atomic counter. + uint64_t counter(0); + errno = 0; + int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t)); + if (bytes_read < 0 && errno == EINTR) + continue; + bool was_interrupted = (bytes_read > 0); + return was_interrupted; + } + } + else + { + for (;;) + { + // Clear all data from the pipe. + char data[1024]; + int bytes_read = ::read(read_descriptor_, data, sizeof(data)); + if (bytes_read < 0 && errno == EINTR) + continue; + bool was_interrupted = (bytes_read > 0); + while (bytes_read == sizeof(data)) + bytes_read = ::read(read_descriptor_, data, sizeof(data)); + return was_interrupted; + } + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_EVENTFD) + +#endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP diff --git a/lib/asio/detail/impl/handler_tracking.ipp b/lib/asio/detail/impl/handler_tracking.ipp new file mode 100644 index 0000000..5a4ff6f --- /dev/null +++ b/lib/asio/detail/impl/handler_tracking.ipp @@ -0,0 +1,358 @@ +// +// detail/impl/handler_tracking.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP +#define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_CUSTOM_HANDLER_TRACKING) + +// The handler tracking implementation is provided by the user-specified header. + +#elif defined(ASIO_ENABLE_HANDLER_TRACKING) + +#include <cstdarg> +#include <cstdio> +#include "asio/detail/handler_tracking.hpp" + +#if defined(ASIO_HAS_BOOST_DATE_TIME) +# include "asio/time_traits.hpp" +#elif defined(ASIO_HAS_CHRONO) +# include "asio/detail/chrono.hpp" +# include "asio/detail/chrono_time_traits.hpp" +# include "asio/wait_traits.hpp" +#endif // defined(ASIO_HAS_BOOST_DATE_TIME) + +#if defined(ASIO_WINDOWS_RUNTIME) +# include "asio/detail/socket_types.hpp" +#elif !defined(ASIO_WINDOWS) +# include <unistd.h> +#endif // !defined(ASIO_WINDOWS) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +struct handler_tracking_timestamp +{ + uint64_t seconds; + uint64_t microseconds; + + handler_tracking_timestamp() + { +#if defined(ASIO_HAS_BOOST_DATE_TIME) + boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); + boost::posix_time::time_duration now = + boost::posix_time::microsec_clock::universal_time() - epoch; +#elif defined(ASIO_HAS_CHRONO) + typedef chrono_time_traits<chrono::system_clock, + asio::wait_traits<chrono::system_clock> > traits_helper; + traits_helper::posix_time_duration now( + chrono::system_clock::now().time_since_epoch()); +#endif + seconds = static_cast<uint64_t>(now.total_seconds()); + microseconds = static_cast<uint64_t>(now.total_microseconds() % 1000000); + } +}; + +struct handler_tracking::tracking_state +{ + static_mutex mutex_; + uint64_t next_id_; + tss_ptr<completion>* current_completion_; +}; + +handler_tracking::tracking_state* handler_tracking::get_state() +{ + static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0 }; + return &state; +} + +void handler_tracking::init() +{ + static tracking_state* state = get_state(); + + state->mutex_.init(); + + static_mutex::scoped_lock lock(state->mutex_); + if (state->current_completion_ == 0) + state->current_completion_ = new tss_ptr<completion>; +} + +void handler_tracking::creation(execution_context&, + handler_tracking::tracked_handler& h, + const char* object_type, void* object, + uintmax_t /*native_handle*/, const char* op_name) +{ + static tracking_state* state = get_state(); + + static_mutex::scoped_lock lock(state->mutex_); + h.id_ = state->next_id_++; + lock.unlock(); + + handler_tracking_timestamp timestamp; + + uint64_t current_id = 0; + if (completion* current_completion = *state->current_completion_) + current_id = current_completion->id_; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + current_id, h.id_, object_type, object, op_name); +} + +handler_tracking::completion::completion( + const handler_tracking::tracked_handler& h) + : id_(h.id_), + invoked_(false), + next_(*get_state()->current_completion_) +{ + *get_state()->current_completion_ = this; +} + +handler_tracking::completion::~completion() +{ + if (id_) + { + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|%c%I64u|\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|%c%llu|\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + invoked_ ? '!' : '~', id_); + } + + *get_state()->current_completion_ = next_; +} + +void handler_tracking::completion::invocation_begin() +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|>%I64u|\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|>%llu|\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, id_); + + invoked_ = true; +} + +void handler_tracking::completion::invocation_begin( + const asio::error_code& ec) +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + id_, ec.category().name(), ec.value()); + + invoked_ = true; +} + +void handler_tracking::completion::invocation_begin( + const asio::error_code& ec, std::size_t bytes_transferred) +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + id_, ec.category().name(), ec.value(), + static_cast<uint64_t>(bytes_transferred)); + + invoked_ = true; +} + +void handler_tracking::completion::invocation_begin( + const asio::error_code& ec, int signal_number) +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + id_, ec.category().name(), ec.value(), signal_number); + + invoked_ = true; +} + +void handler_tracking::completion::invocation_begin( + const asio::error_code& ec, const char* arg) +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + id_, ec.category().name(), ec.value(), arg); + + invoked_ = true; +} + +void handler_tracking::completion::invocation_end() +{ + if (id_) + { + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|<%I64u|\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|<%llu|\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, id_); + + id_ = 0; + } +} + +void handler_tracking::operation(execution_context&, + const char* object_type, void* object, + uintmax_t /*native_handle*/, const char* op_name) +{ + static tracking_state* state = get_state(); + + handler_tracking_timestamp timestamp; + + unsigned long long current_id = 0; + if (completion* current_completion = *state->current_completion_) + current_id = current_completion->id_; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + current_id, object_type, object, op_name); +} + +void handler_tracking::reactor_registration(execution_context& /*context*/, + uintmax_t /*native_handle*/, uintmax_t /*registration*/) +{ +} + +void handler_tracking::reactor_deregistration(execution_context& /*context*/, + uintmax_t /*native_handle*/, uintmax_t /*registration*/) +{ +} + +void handler_tracking::reactor_events(execution_context& /*context*/, + uintmax_t /*native_handle*/, unsigned /*events*/) +{ +} + +void handler_tracking::reactor_operation( + const tracked_handler& h, const char* op_name, + const asio::error_code& ec) +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + h.id_, op_name, ec.category().name(), ec.value()); +} + +void handler_tracking::reactor_operation( + const tracked_handler& h, const char* op_name, + const asio::error_code& ec, std::size_t bytes_transferred) +{ + handler_tracking_timestamp timestamp; + + write_line( +#if defined(ASIO_WINDOWS) + "@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\n", +#else // defined(ASIO_WINDOWS) + "@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\n", +#endif // defined(ASIO_WINDOWS) + timestamp.seconds, timestamp.microseconds, + h.id_, op_name, ec.category().name(), ec.value(), + static_cast<uint64_t>(bytes_transferred)); +} + +void handler_tracking::write_line(const char* format, ...) +{ + using namespace std; // For sprintf (or equivalent). + + va_list args; + va_start(args, format); + + char line[256] = ""; +#if defined(ASIO_HAS_SECURE_RTL) + int length = vsprintf_s(line, sizeof(line), format, args); +#else // defined(ASIO_HAS_SECURE_RTL) + int length = vsprintf(line, format, args); +#endif // defined(ASIO_HAS_SECURE_RTL) + + va_end(args); + +#if defined(ASIO_WINDOWS_RUNTIME) + wchar_t wline[256] = L""; + mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length); + ::OutputDebugStringW(wline); +#elif defined(ASIO_WINDOWS) + HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE); + DWORD bytes_written = 0; + ::WriteFile(stderr_handle, line, length, &bytes_written, 0); +#else // defined(ASIO_WINDOWS) + ::write(STDERR_FILENO, line, length); +#endif // defined(ASIO_WINDOWS) +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_ENABLE_HANDLER_TRACKING) + +#endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP diff --git a/lib/asio/detail/impl/kqueue_reactor.hpp b/lib/asio/detail/impl/kqueue_reactor.hpp new file mode 100644 index 0000000..136d167 --- /dev/null +++ b/lib/asio/detail/impl/kqueue_reactor.hpp @@ -0,0 +1,93 @@ +// +// detail/impl/kqueue_reactor.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP +#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_KQUEUE) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Time_Traits> +void kqueue_reactor::add_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_add_timer_queue(queue); +} + +// Remove a timer queue from the reactor. +template <typename Time_Traits> +void kqueue_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_remove_timer_queue(queue); +} + +template <typename Time_Traits> +void kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue, + const typename Time_Traits::time_type& time, + typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op) +{ + mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + scheduler_.post_immediate_completion(op, false); + return; + } + + bool earliest = queue.enqueue_timer(time, timer, op); + scheduler_.work_started(); + if (earliest) + interrupt(); +} + +template <typename Time_Traits> +std::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& timer, + std::size_t max_cancelled) +{ + mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); + lock.unlock(); + scheduler_.post_deferred_completions(ops); + return n; +} + +template <typename Time_Traits> +void kqueue_reactor::move_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& target, + typename timer_queue<Time_Traits>::per_timer_data& source) +{ + mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + queue.cancel_timer(target, ops); + queue.move_timer(target, source); + lock.unlock(); + scheduler_.post_deferred_completions(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_KQUEUE) + +#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP diff --git a/lib/asio/detail/impl/kqueue_reactor.ipp b/lib/asio/detail/impl/kqueue_reactor.ipp new file mode 100644 index 0000000..73986e0 --- /dev/null +++ b/lib/asio/detail/impl/kqueue_reactor.ipp @@ -0,0 +1,566 @@ +// +// detail/impl/kqueue_reactor.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP +#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_KQUEUE) + +#include "asio/detail/kqueue_reactor.hpp" +#include "asio/detail/scheduler.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +#if defined(__NetBSD__) +# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ + EV_SET(ev, ident, filt, flags, fflags, data, \ + reinterpret_cast<intptr_t>(static_cast<void*>(udata))) +#else +# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ + EV_SET(ev, ident, filt, flags, fflags, data, udata) +#endif + +namespace asio { +namespace detail { + +kqueue_reactor::kqueue_reactor(asio::execution_context& ctx) + : execution_context_service_base<kqueue_reactor>(ctx), + scheduler_(use_service<scheduler>(ctx)), + mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING( + REACTOR_REGISTRATION, scheduler_.concurrency_hint())), + kqueue_fd_(do_kqueue_create()), + interrupter_(), + shutdown_(false), + registered_descriptors_mutex_(mutex_.enabled()) +{ + struct kevent events[1]; + ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), + EVFILT_READ, EV_ADD, 0, 0, &interrupter_); + if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) + { + asio::error_code error(errno, + asio::error::get_system_category()); + asio::detail::throw_error(error); + } +} + +kqueue_reactor::~kqueue_reactor() +{ + close(kqueue_fd_); +} + +void kqueue_reactor::shutdown() +{ + mutex::scoped_lock lock(mutex_); + shutdown_ = true; + lock.unlock(); + + op_queue<operation> ops; + + while (descriptor_state* state = registered_descriptors_.first()) + { + for (int i = 0; i < max_ops; ++i) + ops.push(state->op_queue_[i]); + state->shutdown_ = true; + registered_descriptors_.free(state); + } + + timer_queues_.get_all_timers(ops); + + scheduler_.abandon_operations(ops); +} + +void kqueue_reactor::notify_fork( + asio::execution_context::fork_event fork_ev) +{ + if (fork_ev == asio::execution_context::fork_child) + { + // The kqueue descriptor is automatically closed in the child. + kqueue_fd_ = -1; + kqueue_fd_ = do_kqueue_create(); + + interrupter_.recreate(); + + struct kevent events[2]; + ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), + EVFILT_READ, EV_ADD, 0, 0, &interrupter_); + if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "kqueue interrupter registration"); + } + + // Re-register all descriptors with kqueue. + mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); + for (descriptor_state* state = registered_descriptors_.first(); + state != 0; state = state->next_) + { + if (state->num_kevents_ > 0) + { + ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_, + EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state); + ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_, + EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state); + if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "kqueue re-registration"); + } + } + } + } +} + +void kqueue_reactor::init_task() +{ + scheduler_.init_task(); +} + +int kqueue_reactor::register_descriptor(socket_type descriptor, + kqueue_reactor::per_descriptor_data& descriptor_data) +{ + descriptor_data = allocate_descriptor_state(); + + ASIO_HANDLER_REACTOR_REGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + mutex::scoped_lock lock(descriptor_data->mutex_); + + descriptor_data->descriptor_ = descriptor; + descriptor_data->num_kevents_ = 0; + descriptor_data->shutdown_ = false; + + return 0; +} + +int kqueue_reactor::register_internal_descriptor( + int op_type, socket_type descriptor, + kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op) +{ + descriptor_data = allocate_descriptor_state(); + + ASIO_HANDLER_REACTOR_REGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + mutex::scoped_lock lock(descriptor_data->mutex_); + + descriptor_data->descriptor_ = descriptor; + descriptor_data->num_kevents_ = 1; + descriptor_data->shutdown_ = false; + descriptor_data->op_queue_[op_type].push(op); + + struct kevent events[1]; + ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, + EV_ADD | EV_CLEAR, 0, 0, descriptor_data); + if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) + return errno; + + return 0; +} + +void kqueue_reactor::move_descriptor(socket_type, + kqueue_reactor::per_descriptor_data& target_descriptor_data, + kqueue_reactor::per_descriptor_data& source_descriptor_data) +{ + target_descriptor_data = source_descriptor_data; + source_descriptor_data = 0; +} + +void kqueue_reactor::start_op(int op_type, socket_type descriptor, + kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op, + bool is_continuation, bool allow_speculative) +{ + if (!descriptor_data) + { + op->ec_ = asio::error::bad_descriptor; + post_immediate_completion(op, is_continuation); + return; + } + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (descriptor_data->shutdown_) + { + post_immediate_completion(op, is_continuation); + return; + } + + if (descriptor_data->op_queue_[op_type].empty()) + { + static const int num_kevents[max_ops] = { 1, 2, 1 }; + + if (allow_speculative + && (op_type != read_op + || descriptor_data->op_queue_[except_op].empty())) + { + if (op->perform()) + { + descriptor_lock.unlock(); + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + + if (descriptor_data->num_kevents_ < num_kevents[op_type]) + { + struct kevent events[2]; + ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, + EV_ADD | EV_CLEAR, 0, 0, descriptor_data); + ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, + EV_ADD | EV_CLEAR, 0, 0, descriptor_data); + if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1) + { + descriptor_data->num_kevents_ = num_kevents[op_type]; + } + else + { + op->ec_ = asio::error_code(errno, + asio::error::get_system_category()); + scheduler_.post_immediate_completion(op, is_continuation); + return; + } + } + } + else + { + if (descriptor_data->num_kevents_ < num_kevents[op_type]) + descriptor_data->num_kevents_ = num_kevents[op_type]; + + struct kevent events[2]; + ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, + EV_ADD | EV_CLEAR, 0, 0, descriptor_data); + ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, + EV_ADD | EV_CLEAR, 0, 0, descriptor_data); + ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); + } + } + + descriptor_data->op_queue_[op_type].push(op); + scheduler_.work_started(); +} + +void kqueue_reactor::cancel_ops(socket_type, + kqueue_reactor::per_descriptor_data& descriptor_data) +{ + if (!descriptor_data) + return; + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + { + while (reactor_op* op = descriptor_data->op_queue_[i].front()) + { + op->ec_ = asio::error::operation_aborted; + descriptor_data->op_queue_[i].pop(); + ops.push(op); + } + } + + descriptor_lock.unlock(); + + scheduler_.post_deferred_completions(ops); +} + +void kqueue_reactor::deregister_descriptor(socket_type descriptor, + kqueue_reactor::per_descriptor_data& descriptor_data, bool closing) +{ + if (!descriptor_data) + return; + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (!descriptor_data->shutdown_) + { + if (closing) + { + // The descriptor will be automatically removed from the kqueue when it + // is closed. + } + else + { + struct kevent events[2]; + ASIO_KQUEUE_EV_SET(&events[0], descriptor, + EVFILT_READ, EV_DELETE, 0, 0, 0); + ASIO_KQUEUE_EV_SET(&events[1], descriptor, + EVFILT_WRITE, EV_DELETE, 0, 0, 0); + ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); + } + + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + { + while (reactor_op* op = descriptor_data->op_queue_[i].front()) + { + op->ec_ = asio::error::operation_aborted; + descriptor_data->op_queue_[i].pop(); + ops.push(op); + } + } + + descriptor_data->descriptor_ = -1; + descriptor_data->shutdown_ = true; + + descriptor_lock.unlock(); + + ASIO_HANDLER_REACTOR_DEREGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + scheduler_.post_deferred_completions(ops); + + // Leave descriptor_data set so that it will be freed by the subsequent + // call to cleanup_descriptor_data. + } + else + { + // We are shutting down, so prevent cleanup_descriptor_data from freeing + // the descriptor_data object and let the destructor free it instead. + descriptor_data = 0; + } +} + +void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor, + kqueue_reactor::per_descriptor_data& descriptor_data) +{ + if (!descriptor_data) + return; + + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (!descriptor_data->shutdown_) + { + struct kevent events[2]; + ASIO_KQUEUE_EV_SET(&events[0], descriptor, + EVFILT_READ, EV_DELETE, 0, 0, 0); + ASIO_KQUEUE_EV_SET(&events[1], descriptor, + EVFILT_WRITE, EV_DELETE, 0, 0, 0); + ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); + + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + ops.push(descriptor_data->op_queue_[i]); + + descriptor_data->descriptor_ = -1; + descriptor_data->shutdown_ = true; + + descriptor_lock.unlock(); + + ASIO_HANDLER_REACTOR_DEREGISTRATION(( + context(), static_cast<uintmax_t>(descriptor), + reinterpret_cast<uintmax_t>(descriptor_data))); + + // Leave descriptor_data set so that it will be freed by the subsequent + // call to cleanup_descriptor_data. + } + else + { + // We are shutting down, so prevent cleanup_descriptor_data from freeing + // the descriptor_data object and let the destructor free it instead. + descriptor_data = 0; + } +} + +void kqueue_reactor::cleanup_descriptor_data( + per_descriptor_data& descriptor_data) +{ + if (descriptor_data) + { + free_descriptor_state(descriptor_data); + descriptor_data = 0; + } +} + +void kqueue_reactor::run(long usec, op_queue<operation>& ops) +{ + mutex::scoped_lock lock(mutex_); + + // Determine how long to block while waiting for events. + timespec timeout_buf = { 0, 0 }; + timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf; + + lock.unlock(); + + // Block on the kqueue descriptor. + struct kevent events[128]; + int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout); + +#if defined(ASIO_ENABLE_HANDLER_TRACKING) + // Trace the waiting events. + for (int i = 0; i < num_events; ++i) + { + void* ptr = reinterpret_cast<void*>(events[i].udata); + if (ptr != &interrupter_) + { + unsigned event_mask = 0; + switch (events[i].filter) + { + case EVFILT_READ: + event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT; + break; + case EVFILT_WRITE: + event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT; + break; + } + if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0) + event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT; + ASIO_HANDLER_REACTOR_EVENTS((context(), + reinterpret_cast<uintmax_t>(ptr), event_mask)); + } + } +#endif // defined(ASIO_ENABLE_HANDLER_TRACKING) + + // Dispatch the waiting events. + for (int i = 0; i < num_events; ++i) + { + void* ptr = reinterpret_cast<void*>(events[i].udata); + if (ptr == &interrupter_) + { + interrupter_.reset(); + } + else + { + descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr); + mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); + + if (events[i].filter == EVFILT_WRITE + && descriptor_data->num_kevents_ == 2 + && descriptor_data->op_queue_[write_op].empty()) + { + // Some descriptor types, like serial ports, don't seem to support + // EV_CLEAR with EVFILT_WRITE. Since we have no pending write + // operations we'll remove the EVFILT_WRITE registration here so that + // we don't end up in a tight spin. + struct kevent delete_events[1]; + ASIO_KQUEUE_EV_SET(&delete_events[0], + descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0); + ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0); + descriptor_data->num_kevents_ = 1; + } + + // Exception operations must be processed first to ensure that any + // out-of-band data is read before normal data. +#if defined(__NetBSD__) + static const unsigned int filter[max_ops] = +#else + static const int filter[max_ops] = +#endif + { EVFILT_READ, EVFILT_WRITE, EVFILT_READ }; + for (int j = max_ops - 1; j >= 0; --j) + { + if (events[i].filter == filter[j]) + { + if (j != except_op || events[i].flags & EV_OOBAND) + { + while (reactor_op* op = descriptor_data->op_queue_[j].front()) + { + if (events[i].flags & EV_ERROR) + { + op->ec_ = asio::error_code( + static_cast<int>(events[i].data), + asio::error::get_system_category()); + descriptor_data->op_queue_[j].pop(); + ops.push(op); + } + if (op->perform()) + { + descriptor_data->op_queue_[j].pop(); + ops.push(op); + } + else + break; + } + } + } + } + } + } + + lock.lock(); + timer_queues_.get_ready_timers(ops); +} + +void kqueue_reactor::interrupt() +{ + interrupter_.interrupt(); +} + +int kqueue_reactor::do_kqueue_create() +{ + int fd = ::kqueue(); + if (fd == -1) + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "kqueue"); + } + return fd; +} + +kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state() +{ + mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); + return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING( + REACTOR_IO, scheduler_.concurrency_hint())); +} + +void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s) +{ + mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); + registered_descriptors_.free(s); +} + +void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.insert(&queue); +} + +void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.erase(&queue); +} + +timespec* kqueue_reactor::get_timeout(long usec, timespec& ts) +{ + // By default we will wait no longer than 5 minutes. This will ensure that + // any changes to the system clock are detected after no longer than this. + const long max_usec = 5 * 60 * 1000 * 1000; + usec = timer_queues_.wait_duration_usec( + (usec < 0 || max_usec < usec) ? max_usec : usec); + ts.tv_sec = usec / 1000000; + ts.tv_nsec = (usec % 1000000) * 1000; + return &ts; +} + +} // namespace detail +} // namespace asio + +#undef ASIO_KQUEUE_EV_SET + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_KQUEUE) + +#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP diff --git a/lib/asio/detail/impl/null_event.ipp b/lib/asio/detail/impl/null_event.ipp new file mode 100644 index 0000000..22ade40 --- /dev/null +++ b/lib/asio/detail/impl/null_event.ipp @@ -0,0 +1,74 @@ +// +// detail/impl/null_event.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_NULL_EVENT_IPP +#define ASIO_DETAIL_IMPL_NULL_EVENT_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS_RUNTIME) +# include <thread> +#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) +# include "asio/detail/socket_types.hpp" +#else +# include <unistd.h> +# if defined(__hpux) +# include <sys/time.h> +# endif +# if !defined(__hpux) || defined(__SELECT) +# include <sys/select.h> +# endif +#endif + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +void null_event::do_wait() +{ +#if defined(ASIO_WINDOWS_RUNTIME) + std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)()); +#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ::Sleep(INFINITE); +#else + ::pause(); +#endif +} + +void null_event::do_wait_for_usec(long usec) +{ +#if defined(ASIO_WINDOWS_RUNTIME) + std::this_thread::sleep_for(std::chrono::microseconds(usec)); +#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ::Sleep(usec / 1000); +#elif defined(__hpux) && defined(__SELECT) + timespec ts; + ts.tv_sec = usec / 1000000; + ts.tv_nsec = (usec % 1000000) * 1000; + ::pselect(0, 0, 0, 0, &ts, 0); +#else + timeval tv; + tv.tv_sec = usec / 1000000; + tv.tv_usec = usec % 1000000; + ::select(0, 0, 0, 0, &tv); +#endif +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_NULL_EVENT_IPP diff --git a/lib/asio/detail/impl/pipe_select_interrupter.ipp b/lib/asio/detail/impl/pipe_select_interrupter.ipp new file mode 100644 index 0000000..13931ab --- /dev/null +++ b/lib/asio/detail/impl/pipe_select_interrupter.ipp @@ -0,0 +1,124 @@ +// +// detail/impl/pipe_select_interrupter.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP +#define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if !defined(ASIO_WINDOWS_RUNTIME) +#if !defined(ASIO_WINDOWS) +#if !defined(__CYGWIN__) +#if !defined(__SYMBIAN32__) +#if !defined(ASIO_HAS_EVENTFD) + +#include <fcntl.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include "asio/detail/pipe_select_interrupter.hpp" +#include "asio/detail/socket_types.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +pipe_select_interrupter::pipe_select_interrupter() +{ + open_descriptors(); +} + +void pipe_select_interrupter::open_descriptors() +{ + int pipe_fds[2]; + if (pipe(pipe_fds) == 0) + { + read_descriptor_ = pipe_fds[0]; + ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); + write_descriptor_ = pipe_fds[1]; + ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); + +#if defined(FD_CLOEXEC) + ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); + ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); +#endif // defined(FD_CLOEXEC) + } + else + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "pipe_select_interrupter"); + } +} + +pipe_select_interrupter::~pipe_select_interrupter() +{ + close_descriptors(); +} + +void pipe_select_interrupter::close_descriptors() +{ + if (read_descriptor_ != -1) + ::close(read_descriptor_); + if (write_descriptor_ != -1) + ::close(write_descriptor_); +} + +void pipe_select_interrupter::recreate() +{ + close_descriptors(); + + write_descriptor_ = -1; + read_descriptor_ = -1; + + open_descriptors(); +} + +void pipe_select_interrupter::interrupt() +{ + char byte = 0; + signed_size_type result = ::write(write_descriptor_, &byte, 1); + (void)result; +} + +bool pipe_select_interrupter::reset() +{ + for (;;) + { + char data[1024]; + signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data)); + if (bytes_read < 0 && errno == EINTR) + continue; + bool was_interrupted = (bytes_read > 0); + while (bytes_read == sizeof(data)) + bytes_read = ::read(read_descriptor_, data, sizeof(data)); + return was_interrupted; + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // !defined(ASIO_HAS_EVENTFD) +#endif // !defined(__SYMBIAN32__) +#endif // !defined(__CYGWIN__) +#endif // !defined(ASIO_WINDOWS) +#endif // !defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP diff --git a/lib/asio/detail/impl/posix_event.ipp b/lib/asio/detail/impl/posix_event.ipp new file mode 100644 index 0000000..4b46ab0 --- /dev/null +++ b/lib/asio/detail/impl/posix_event.ipp @@ -0,0 +1,59 @@ +// +// detail/impl/posix_event.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP +#define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_PTHREADS) + +#include "asio/detail/posix_event.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +posix_event::posix_event() + : state_(0) +{ +#if (defined(__MACH__) && defined(__APPLE__)) \ + || (defined(__ANDROID__) && (__ANDROID_API__ < 21)) + int error = ::pthread_cond_init(&cond_, 0); +#else // (defined(__MACH__) && defined(__APPLE__)) + // || (defined(__ANDROID__) && (__ANDROID_API__ < 21)) + ::pthread_condattr_t attr; + ::pthread_condattr_init(&attr); + int error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + if (error == 0) + error = ::pthread_cond_init(&cond_, &attr); +#endif // (defined(__MACH__) && defined(__APPLE__)) + // || (defined(__ANDROID__) && (__ANDROID_API__ < 21)) + + asio::error_code ec(error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "event"); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_PTHREADS) + +#endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP diff --git a/lib/asio/detail/impl/posix_mutex.ipp b/lib/asio/detail/impl/posix_mutex.ipp new file mode 100644 index 0000000..27272b5 --- /dev/null +++ b/lib/asio/detail/impl/posix_mutex.ipp @@ -0,0 +1,46 @@ +// +// detail/impl/posix_mutex.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP +#define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_PTHREADS) + +#include "asio/detail/posix_mutex.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +posix_mutex::posix_mutex() +{ + int error = ::pthread_mutex_init(&mutex_, 0); + asio::error_code ec(error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "mutex"); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_PTHREADS) + +#endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP diff --git a/lib/asio/detail/impl/posix_thread.ipp b/lib/asio/detail/impl/posix_thread.ipp new file mode 100644 index 0000000..69bd16f --- /dev/null +++ b/lib/asio/detail/impl/posix_thread.ipp @@ -0,0 +1,84 @@ +// +// detail/impl/posix_thread.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP +#define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_PTHREADS) + +#include "asio/detail/posix_thread.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +posix_thread::~posix_thread() +{ + if (!joined_) + ::pthread_detach(thread_); +} + +void posix_thread::join() +{ + if (!joined_) + { + ::pthread_join(thread_, 0); + joined_ = true; + } +} + +std::size_t posix_thread::hardware_concurrency() +{ +#if defined(_SC_NPROCESSORS_ONLN) + long result = sysconf(_SC_NPROCESSORS_ONLN); + if (result > 0) + return result; +#endif // defined(_SC_NPROCESSORS_ONLN) + return 0; +} + +void posix_thread::start_thread(func_base* arg) +{ + int error = ::pthread_create(&thread_, 0, + asio_detail_posix_thread_function, arg); + if (error != 0) + { + delete arg; + asio::error_code ec(error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "thread"); + } +} + +void* asio_detail_posix_thread_function(void* arg) +{ + posix_thread::auto_func_base_ptr func = { + static_cast<posix_thread::func_base*>(arg) }; + func.ptr->run(); + return 0; +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_PTHREADS) + +#endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP diff --git a/lib/asio/detail/impl/posix_tss_ptr.ipp b/lib/asio/detail/impl/posix_tss_ptr.ipp new file mode 100644 index 0000000..54b58bd --- /dev/null +++ b/lib/asio/detail/impl/posix_tss_ptr.ipp @@ -0,0 +1,46 @@ +// +// detail/impl/posix_tss_ptr.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP +#define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_PTHREADS) + +#include "asio/detail/posix_tss_ptr.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +void posix_tss_ptr_create(pthread_key_t& key) +{ + int error = ::pthread_key_create(&key, 0); + asio::error_code ec(error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "tss"); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_PTHREADS) + +#endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP diff --git a/lib/asio/detail/impl/reactive_descriptor_service.ipp b/lib/asio/detail/impl/reactive_descriptor_service.ipp new file mode 100644 index 0000000..f9505ca --- /dev/null +++ b/lib/asio/detail/impl/reactive_descriptor_service.ipp @@ -0,0 +1,222 @@ +// +// detail/impl/reactive_descriptor_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP +#define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + +#include "asio/error.hpp" +#include "asio/detail/reactive_descriptor_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +reactive_descriptor_service::reactive_descriptor_service( + asio::io_context& io_context) + : service_base<reactive_descriptor_service>(io_context), + reactor_(asio::use_service<reactor>(io_context)) +{ + reactor_.init_task(); +} + +void reactive_descriptor_service::shutdown() +{ +} + +void reactive_descriptor_service::construct( + reactive_descriptor_service::implementation_type& impl) +{ + impl.descriptor_ = -1; + impl.state_ = 0; +} + +void reactive_descriptor_service::move_construct( + reactive_descriptor_service::implementation_type& impl, + reactive_descriptor_service::implementation_type& other_impl) +{ + impl.descriptor_ = other_impl.descriptor_; + other_impl.descriptor_ = -1; + + impl.state_ = other_impl.state_; + other_impl.state_ = 0; + + reactor_.move_descriptor(impl.descriptor_, + impl.reactor_data_, other_impl.reactor_data_); +} + +void reactive_descriptor_service::move_assign( + reactive_descriptor_service::implementation_type& impl, + reactive_descriptor_service& other_service, + reactive_descriptor_service::implementation_type& other_impl) +{ + destroy(impl); + + impl.descriptor_ = other_impl.descriptor_; + other_impl.descriptor_ = -1; + + impl.state_ = other_impl.state_; + other_impl.state_ = 0; + + other_service.reactor_.move_descriptor(impl.descriptor_, + impl.reactor_data_, other_impl.reactor_data_); +} + +void reactive_descriptor_service::destroy( + reactive_descriptor_service::implementation_type& impl) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((reactor_.context(), + "descriptor", &impl, impl.descriptor_, "close")); + + reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, + (impl.state_ & descriptor_ops::possible_dup) == 0); + + asio::error_code ignored_ec; + descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec); + + reactor_.cleanup_descriptor_data(impl.reactor_data_); + } +} + +asio::error_code reactive_descriptor_service::assign( + reactive_descriptor_service::implementation_type& impl, + const native_handle_type& native_descriptor, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + if (int err = reactor_.register_descriptor( + native_descriptor, impl.reactor_data_)) + { + ec = asio::error_code(err, + asio::error::get_system_category()); + return ec; + } + + impl.descriptor_ = native_descriptor; + impl.state_ = descriptor_ops::possible_dup; + ec = asio::error_code(); + return ec; +} + +asio::error_code reactive_descriptor_service::close( + reactive_descriptor_service::implementation_type& impl, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((reactor_.context(), + "descriptor", &impl, impl.descriptor_, "close")); + + reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, + (impl.state_ & descriptor_ops::possible_dup) == 0); + + descriptor_ops::close(impl.descriptor_, impl.state_, ec); + + reactor_.cleanup_descriptor_data(impl.reactor_data_); + } + else + { + ec = asio::error_code(); + } + + // The descriptor is closed by the OS even if close() returns an error. + // + // (Actually, POSIX says the state of the descriptor is unspecified. On + // Linux the descriptor is apparently closed anyway; e.g. see + // http://lkml.org/lkml/2005/9/10/129 + // We'll just have to assume that other OSes follow the same behaviour.) + construct(impl); + + return ec; +} + +reactive_descriptor_service::native_handle_type +reactive_descriptor_service::release( + reactive_descriptor_service::implementation_type& impl) +{ + native_handle_type descriptor = impl.descriptor_; + + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((reactor_.context(), + "descriptor", &impl, impl.descriptor_, "release")); + + reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false); + reactor_.cleanup_descriptor_data(impl.reactor_data_); + construct(impl); + } + + return descriptor; +} + +asio::error_code reactive_descriptor_service::cancel( + reactive_descriptor_service::implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return ec; + } + + ASIO_HANDLER_OPERATION((reactor_.context(), + "descriptor", &impl, impl.descriptor_, "cancel")); + + reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_); + ec = asio::error_code(); + return ec; +} + +void reactive_descriptor_service::start_op( + reactive_descriptor_service::implementation_type& impl, + int op_type, reactor_op* op, bool is_continuation, + bool is_non_blocking, bool noop) +{ + if (!noop) + { + if ((impl.state_ & descriptor_ops::non_blocking) || + descriptor_ops::set_internal_non_blocking( + impl.descriptor_, impl.state_, true, op->ec_)) + { + reactor_.start_op(op_type, impl.descriptor_, + impl.reactor_data_, op, is_continuation, is_non_blocking); + return; + } + } + + reactor_.post_immediate_completion(op, is_continuation); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + +#endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP diff --git a/lib/asio/detail/impl/reactive_serial_port_service.ipp b/lib/asio/detail/impl/reactive_serial_port_service.ipp new file mode 100644 index 0000000..c907835 --- /dev/null +++ b/lib/asio/detail/impl/reactive_serial_port_service.ipp @@ -0,0 +1,152 @@ +// +// detail/impl/reactive_serial_port_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP +#define ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_SERIAL_PORT) +#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) + +#include <cstring> +#include "asio/detail/reactive_serial_port_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +reactive_serial_port_service::reactive_serial_port_service( + asio::io_context& io_context) + : service_base<reactive_serial_port_service>(io_context), + descriptor_service_(io_context) +{ +} + +void reactive_serial_port_service::shutdown() +{ + descriptor_service_.shutdown(); +} + +asio::error_code reactive_serial_port_service::open( + reactive_serial_port_service::implementation_type& impl, + const std::string& device, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + descriptor_ops::state_type state = 0; + int fd = descriptor_ops::open(device.c_str(), + O_RDWR | O_NONBLOCK | O_NOCTTY, ec); + if (fd < 0) + return ec; + + int s = descriptor_ops::fcntl(fd, F_GETFL, ec); + if (s >= 0) + s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec); + if (s < 0) + { + asio::error_code ignored_ec; + descriptor_ops::close(fd, state, ignored_ec); + return ec; + } + + // Set up default serial port options. + termios ios; + errno = 0; + s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec); + if (s >= 0) + { +#if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) + ::cfmakeraw(&ios); +#else + ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK + | ISTRIP | INLCR | IGNCR | ICRNL | IXON); + ios.c_oflag &= ~OPOST; + ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + ios.c_cflag &= ~(CSIZE | PARENB); + ios.c_cflag |= CS8; +#endif + ios.c_iflag |= IGNPAR; + ios.c_cflag |= CREAD | CLOCAL; + errno = 0; + s = descriptor_ops::error_wrapper(::tcsetattr(fd, TCSANOW, &ios), ec); + } + if (s < 0) + { + asio::error_code ignored_ec; + descriptor_ops::close(fd, state, ignored_ec); + return ec; + } + + // We're done. Take ownership of the serial port descriptor. + if (descriptor_service_.assign(impl, fd, ec)) + { + asio::error_code ignored_ec; + descriptor_ops::close(fd, state, ignored_ec); + } + + return ec; +} + +asio::error_code reactive_serial_port_service::do_set_option( + reactive_serial_port_service::implementation_type& impl, + reactive_serial_port_service::store_function_type store, + const void* option, asio::error_code& ec) +{ + termios ios; + errno = 0; + descriptor_ops::error_wrapper(::tcgetattr( + descriptor_service_.native_handle(impl), &ios), ec); + if (ec) + return ec; + + if (store(option, ios, ec)) + return ec; + + errno = 0; + descriptor_ops::error_wrapper(::tcsetattr( + descriptor_service_.native_handle(impl), TCSANOW, &ios), ec); + return ec; +} + +asio::error_code reactive_serial_port_service::do_get_option( + const reactive_serial_port_service::implementation_type& impl, + reactive_serial_port_service::load_function_type load, + void* option, asio::error_code& ec) const +{ + termios ios; + errno = 0; + descriptor_ops::error_wrapper(::tcgetattr( + descriptor_service_.native_handle(impl), &ios), ec); + if (ec) + return ec; + + return load(option, ios, ec); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) +#endif // defined(ASIO_HAS_SERIAL_PORT) + +#endif // ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP diff --git a/lib/asio/detail/impl/reactive_socket_service_base.ipp b/lib/asio/detail/impl/reactive_socket_service_base.ipp new file mode 100644 index 0000000..129b851 --- /dev/null +++ b/lib/asio/detail/impl/reactive_socket_service_base.ipp @@ -0,0 +1,300 @@ +// +// detail/reactive_socket_service_base.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP +#define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if !defined(ASIO_HAS_IOCP) \ + && !defined(ASIO_WINDOWS_RUNTIME) + +#include "asio/detail/reactive_socket_service_base.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +reactive_socket_service_base::reactive_socket_service_base( + asio::io_context& io_context) + : io_context_(io_context), + reactor_(use_service<reactor>(io_context)) +{ + reactor_.init_task(); +} + +void reactive_socket_service_base::base_shutdown() +{ +} + +void reactive_socket_service_base::construct( + reactive_socket_service_base::base_implementation_type& impl) +{ + impl.socket_ = invalid_socket; + impl.state_ = 0; +} + +void reactive_socket_service_base::base_move_construct( + reactive_socket_service_base::base_implementation_type& impl, + reactive_socket_service_base::base_implementation_type& other_impl) +{ + impl.socket_ = other_impl.socket_; + other_impl.socket_ = invalid_socket; + + impl.state_ = other_impl.state_; + other_impl.state_ = 0; + + reactor_.move_descriptor(impl.socket_, + impl.reactor_data_, other_impl.reactor_data_); +} + +void reactive_socket_service_base::base_move_assign( + reactive_socket_service_base::base_implementation_type& impl, + reactive_socket_service_base& other_service, + reactive_socket_service_base::base_implementation_type& other_impl) +{ + destroy(impl); + + impl.socket_ = other_impl.socket_; + other_impl.socket_ = invalid_socket; + + impl.state_ = other_impl.state_; + other_impl.state_ = 0; + + other_service.reactor_.move_descriptor(impl.socket_, + impl.reactor_data_, other_impl.reactor_data_); +} + +void reactive_socket_service_base::destroy( + reactive_socket_service_base::base_implementation_type& impl) +{ + if (impl.socket_ != invalid_socket) + { + ASIO_HANDLER_OPERATION((reactor_.context(), + "socket", &impl, impl.socket_, "close")); + + reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, + (impl.state_ & socket_ops::possible_dup) == 0); + + asio::error_code ignored_ec; + socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); + + reactor_.cleanup_descriptor_data(impl.reactor_data_); + } +} + +asio::error_code reactive_socket_service_base::close( + reactive_socket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((reactor_.context(), + "socket", &impl, impl.socket_, "close")); + + reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, + (impl.state_ & socket_ops::possible_dup) == 0); + + socket_ops::close(impl.socket_, impl.state_, false, ec); + + reactor_.cleanup_descriptor_data(impl.reactor_data_); + } + else + { + ec = asio::error_code(); + } + + // The descriptor is closed by the OS even if close() returns an error. + // + // (Actually, POSIX says the state of the descriptor is unspecified. On + // Linux the descriptor is apparently closed anyway; e.g. see + // http://lkml.org/lkml/2005/9/10/129 + // We'll just have to assume that other OSes follow the same behaviour. The + // known exception is when Windows's closesocket() function fails with + // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close(). + construct(impl); + + return ec; +} + +socket_type reactive_socket_service_base::release( + reactive_socket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return invalid_socket; + } + + ASIO_HANDLER_OPERATION((reactor_.context(), + "socket", &impl, impl.socket_, "release")); + + reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false); + reactor_.cleanup_descriptor_data(impl.reactor_data_); + socket_type sock = impl.socket_; + construct(impl); + ec = asio::error_code(); + return sock; +} + +asio::error_code reactive_socket_service_base::cancel( + reactive_socket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return ec; + } + + ASIO_HANDLER_OPERATION((reactor_.context(), + "socket", &impl, impl.socket_, "cancel")); + + reactor_.cancel_ops(impl.socket_, impl.reactor_data_); + ec = asio::error_code(); + return ec; +} + +asio::error_code reactive_socket_service_base::do_open( + reactive_socket_service_base::base_implementation_type& impl, + int af, int type, int protocol, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + socket_holder sock(socket_ops::socket(af, type, protocol, ec)); + if (sock.get() == invalid_socket) + return ec; + + if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_)) + { + ec = asio::error_code(err, + asio::error::get_system_category()); + return ec; + } + + impl.socket_ = sock.release(); + switch (type) + { + case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; + case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; + default: impl.state_ = 0; break; + } + ec = asio::error_code(); + return ec; +} + +asio::error_code reactive_socket_service_base::do_assign( + reactive_socket_service_base::base_implementation_type& impl, int type, + const reactive_socket_service_base::native_handle_type& native_socket, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + if (int err = reactor_.register_descriptor( + native_socket, impl.reactor_data_)) + { + ec = asio::error_code(err, + asio::error::get_system_category()); + return ec; + } + + impl.socket_ = native_socket; + switch (type) + { + case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; + case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; + default: impl.state_ = 0; break; + } + impl.state_ |= socket_ops::possible_dup; + ec = asio::error_code(); + return ec; +} + +void reactive_socket_service_base::start_op( + reactive_socket_service_base::base_implementation_type& impl, + int op_type, reactor_op* op, bool is_continuation, + bool is_non_blocking, bool noop) +{ + if (!noop) + { + if ((impl.state_ & socket_ops::non_blocking) + || socket_ops::set_internal_non_blocking( + impl.socket_, impl.state_, true, op->ec_)) + { + reactor_.start_op(op_type, impl.socket_, + impl.reactor_data_, op, is_continuation, is_non_blocking); + return; + } + } + + reactor_.post_immediate_completion(op, is_continuation); +} + +void reactive_socket_service_base::start_accept_op( + reactive_socket_service_base::base_implementation_type& impl, + reactor_op* op, bool is_continuation, bool peer_is_open) +{ + if (!peer_is_open) + start_op(impl, reactor::read_op, op, is_continuation, true, false); + else + { + op->ec_ = asio::error::already_open; + reactor_.post_immediate_completion(op, is_continuation); + } +} + +void reactive_socket_service_base::start_connect_op( + reactive_socket_service_base::base_implementation_type& impl, + reactor_op* op, bool is_continuation, + const socket_addr_type* addr, size_t addrlen) +{ + if ((impl.state_ & socket_ops::non_blocking) + || socket_ops::set_internal_non_blocking( + impl.socket_, impl.state_, true, op->ec_)) + { + if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) + { + if (op->ec_ == asio::error::in_progress + || op->ec_ == asio::error::would_block) + { + op->ec_ = asio::error_code(); + reactor_.start_op(reactor::connect_op, impl.socket_, + impl.reactor_data_, op, is_continuation, false); + return; + } + } + } + + reactor_.post_immediate_completion(op, is_continuation); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // !defined(ASIO_HAS_IOCP) + // && !defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP diff --git a/lib/asio/detail/impl/resolver_service_base.ipp b/lib/asio/detail/impl/resolver_service_base.ipp new file mode 100644 index 0000000..540bb66 --- /dev/null +++ b/lib/asio/detail/impl/resolver_service_base.ipp @@ -0,0 +1,154 @@ +// +// detail/impl/resolver_service_base.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP +#define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include "asio/detail/resolver_service_base.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +class resolver_service_base::work_io_context_runner +{ +public: + work_io_context_runner(asio::io_context& io_context) + : io_context_(io_context) {} + void operator()() { io_context_.run(); } +private: + asio::io_context& io_context_; +}; + +resolver_service_base::resolver_service_base( + asio::io_context& io_context) + : io_context_impl_(asio::use_service<io_context_impl>(io_context)), + work_io_context_(new asio::io_context(-1)), + work_io_context_impl_(asio::use_service< + io_context_impl>(*work_io_context_)), + work_(asio::make_work_guard(*work_io_context_)), + work_thread_(0) +{ +} + +resolver_service_base::~resolver_service_base() +{ + base_shutdown(); +} + +void resolver_service_base::base_shutdown() +{ + work_.reset(); + if (work_io_context_.get()) + { + work_io_context_->stop(); + if (work_thread_.get()) + { + work_thread_->join(); + work_thread_.reset(); + } + work_io_context_.reset(); + } +} + +void resolver_service_base::base_notify_fork( + asio::io_context::fork_event fork_ev) +{ + if (work_thread_.get()) + { + if (fork_ev == asio::io_context::fork_prepare) + { + work_io_context_->stop(); + work_thread_->join(); + } + else + { + work_io_context_->restart(); + work_thread_.reset(new asio::detail::thread( + work_io_context_runner(*work_io_context_))); + } + } +} + +void resolver_service_base::construct( + resolver_service_base::implementation_type& impl) +{ + impl.reset(static_cast<void*>(0), socket_ops::noop_deleter()); +} + +void resolver_service_base::destroy( + resolver_service_base::implementation_type& impl) +{ + ASIO_HANDLER_OPERATION((io_context_impl_.context(), + "resolver", &impl, 0, "cancel")); + + impl.reset(); +} + +void resolver_service_base::move_construct(implementation_type& impl, + implementation_type& other_impl) +{ + impl = ASIO_MOVE_CAST(implementation_type)(other_impl); +} + +void resolver_service_base::move_assign(implementation_type& impl, + resolver_service_base&, implementation_type& other_impl) +{ + destroy(impl); + impl = ASIO_MOVE_CAST(implementation_type)(other_impl); +} + +void resolver_service_base::cancel( + resolver_service_base::implementation_type& impl) +{ + ASIO_HANDLER_OPERATION((io_context_impl_.context(), + "resolver", &impl, 0, "cancel")); + + impl.reset(static_cast<void*>(0), socket_ops::noop_deleter()); +} + +void resolver_service_base::start_resolve_op(resolve_op* op) +{ + if (ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER, + io_context_impl_.concurrency_hint())) + { + start_work_thread(); + io_context_impl_.work_started(); + work_io_context_impl_.post_immediate_completion(op, false); + } + else + { + op->ec_ = asio::error::operation_not_supported; + io_context_impl_.post_immediate_completion(op, false); + } +} + +void resolver_service_base::start_work_thread() +{ + asio::detail::mutex::scoped_lock lock(mutex_); + if (!work_thread_.get()) + { + work_thread_.reset(new asio::detail::thread( + work_io_context_runner(*work_io_context_))); + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP diff --git a/lib/asio/detail/impl/scheduler.ipp b/lib/asio/detail/impl/scheduler.ipp new file mode 100644 index 0000000..35bc678 --- /dev/null +++ b/lib/asio/detail/impl/scheduler.ipp @@ -0,0 +1,571 @@ +// +// detail/impl/scheduler.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP +#define ASIO_DETAIL_IMPL_SCHEDULER_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#include "asio/detail/concurrency_hint.hpp" +#include "asio/detail/event.hpp" +#include "asio/detail/limits.hpp" +#include "asio/detail/reactor.hpp" +#include "asio/detail/scheduler.hpp" +#include "asio/detail/scheduler_thread_info.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +struct scheduler::task_cleanup +{ + ~task_cleanup() + { + if (this_thread_->private_outstanding_work > 0) + { + asio::detail::increment( + scheduler_->outstanding_work_, + this_thread_->private_outstanding_work); + } + this_thread_->private_outstanding_work = 0; + + // Enqueue the completed operations and reinsert the task at the end of + // the operation queue. + lock_->lock(); + scheduler_->task_interrupted_ = true; + scheduler_->op_queue_.push(this_thread_->private_op_queue); + scheduler_->op_queue_.push(&scheduler_->task_operation_); + } + + scheduler* scheduler_; + mutex::scoped_lock* lock_; + thread_info* this_thread_; +}; + +struct scheduler::work_cleanup +{ + ~work_cleanup() + { + if (this_thread_->private_outstanding_work > 1) + { + asio::detail::increment( + scheduler_->outstanding_work_, + this_thread_->private_outstanding_work - 1); + } + else if (this_thread_->private_outstanding_work < 1) + { + scheduler_->work_finished(); + } + this_thread_->private_outstanding_work = 0; + +#if defined(ASIO_HAS_THREADS) + if (!this_thread_->private_op_queue.empty()) + { + lock_->lock(); + scheduler_->op_queue_.push(this_thread_->private_op_queue); + } +#endif // defined(ASIO_HAS_THREADS) + } + + scheduler* scheduler_; + mutex::scoped_lock* lock_; + thread_info* this_thread_; +}; + +scheduler::scheduler( + asio::execution_context& ctx, int concurrency_hint) + : asio::detail::execution_context_service_base<scheduler>(ctx), + one_thread_(concurrency_hint == 1 + || !ASIO_CONCURRENCY_HINT_IS_LOCKING( + SCHEDULER, concurrency_hint) + || !ASIO_CONCURRENCY_HINT_IS_LOCKING( + REACTOR_IO, concurrency_hint)), + mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING( + SCHEDULER, concurrency_hint)), + task_(0), + task_interrupted_(true), + outstanding_work_(0), + stopped_(false), + shutdown_(false), + concurrency_hint_(concurrency_hint) +{ + ASIO_HANDLER_TRACKING_INIT; +} + +void scheduler::shutdown() +{ + mutex::scoped_lock lock(mutex_); + shutdown_ = true; + lock.unlock(); + + // Destroy handler objects. + while (!op_queue_.empty()) + { + operation* o = op_queue_.front(); + op_queue_.pop(); + if (o != &task_operation_) + o->destroy(); + } + + // Reset to initial state. + task_ = 0; +} + +void scheduler::init_task() +{ + mutex::scoped_lock lock(mutex_); + if (!shutdown_ && !task_) + { + task_ = &use_service<reactor>(this->context()); + op_queue_.push(&task_operation_); + wake_one_thread_and_unlock(lock); + } +} + +std::size_t scheduler::run(asio::error_code& ec) +{ + ec = asio::error_code(); + if (outstanding_work_ == 0) + { + stop(); + return 0; + } + + thread_info this_thread; + this_thread.private_outstanding_work = 0; + thread_call_stack::context ctx(this, this_thread); + + mutex::scoped_lock lock(mutex_); + + std::size_t n = 0; + for (; do_run_one(lock, this_thread, ec); lock.lock()) + if (n != (std::numeric_limits<std::size_t>::max)()) + ++n; + return n; +} + +std::size_t scheduler::run_one(asio::error_code& ec) +{ + ec = asio::error_code(); + if (outstanding_work_ == 0) + { + stop(); + return 0; + } + + thread_info this_thread; + this_thread.private_outstanding_work = 0; + thread_call_stack::context ctx(this, this_thread); + + mutex::scoped_lock lock(mutex_); + + return do_run_one(lock, this_thread, ec); +} + +std::size_t scheduler::wait_one(long usec, asio::error_code& ec) +{ + ec = asio::error_code(); + if (outstanding_work_ == 0) + { + stop(); + return 0; + } + + thread_info this_thread; + this_thread.private_outstanding_work = 0; + thread_call_stack::context ctx(this, this_thread); + + mutex::scoped_lock lock(mutex_); + + return do_wait_one(lock, this_thread, usec, ec); +} + +std::size_t scheduler::poll(asio::error_code& ec) +{ + ec = asio::error_code(); + if (outstanding_work_ == 0) + { + stop(); + return 0; + } + + thread_info this_thread; + this_thread.private_outstanding_work = 0; + thread_call_stack::context ctx(this, this_thread); + + mutex::scoped_lock lock(mutex_); + +#if defined(ASIO_HAS_THREADS) + // We want to support nested calls to poll() and poll_one(), so any handlers + // that are already on a thread-private queue need to be put on to the main + // queue now. + if (one_thread_) + if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key())) + op_queue_.push(outer_info->private_op_queue); +#endif // defined(ASIO_HAS_THREADS) + + std::size_t n = 0; + for (; do_poll_one(lock, this_thread, ec); lock.lock()) + if (n != (std::numeric_limits<std::size_t>::max)()) + ++n; + return n; +} + +std::size_t scheduler::poll_one(asio::error_code& ec) +{ + ec = asio::error_code(); + if (outstanding_work_ == 0) + { + stop(); + return 0; + } + + thread_info this_thread; + this_thread.private_outstanding_work = 0; + thread_call_stack::context ctx(this, this_thread); + + mutex::scoped_lock lock(mutex_); + +#if defined(ASIO_HAS_THREADS) + // We want to support nested calls to poll() and poll_one(), so any handlers + // that are already on a thread-private queue need to be put on to the main + // queue now. + if (one_thread_) + if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key())) + op_queue_.push(outer_info->private_op_queue); +#endif // defined(ASIO_HAS_THREADS) + + return do_poll_one(lock, this_thread, ec); +} + +void scheduler::stop() +{ + mutex::scoped_lock lock(mutex_); + stop_all_threads(lock); +} + +bool scheduler::stopped() const +{ + mutex::scoped_lock lock(mutex_); + return stopped_; +} + +void scheduler::restart() +{ + mutex::scoped_lock lock(mutex_); + stopped_ = false; +} + +void scheduler::compensating_work_started() +{ + thread_info_base* this_thread = thread_call_stack::contains(this); + ++static_cast<thread_info*>(this_thread)->private_outstanding_work; +} + +void scheduler::post_immediate_completion( + scheduler::operation* op, bool is_continuation) +{ +#if defined(ASIO_HAS_THREADS) + if (one_thread_ || is_continuation) + { + if (thread_info_base* this_thread = thread_call_stack::contains(this)) + { + ++static_cast<thread_info*>(this_thread)->private_outstanding_work; + static_cast<thread_info*>(this_thread)->private_op_queue.push(op); + return; + } + } +#else // defined(ASIO_HAS_THREADS) + (void)is_continuation; +#endif // defined(ASIO_HAS_THREADS) + + work_started(); + mutex::scoped_lock lock(mutex_); + op_queue_.push(op); + wake_one_thread_and_unlock(lock); +} + +void scheduler::post_deferred_completion(scheduler::operation* op) +{ +#if defined(ASIO_HAS_THREADS) + if (one_thread_) + { + if (thread_info_base* this_thread = thread_call_stack::contains(this)) + { + static_cast<thread_info*>(this_thread)->private_op_queue.push(op); + return; + } + } +#endif // defined(ASIO_HAS_THREADS) + + mutex::scoped_lock lock(mutex_); + op_queue_.push(op); + wake_one_thread_and_unlock(lock); +} + +void scheduler::post_deferred_completions( + op_queue<scheduler::operation>& ops) +{ + if (!ops.empty()) + { +#if defined(ASIO_HAS_THREADS) + if (one_thread_) + { + if (thread_info_base* this_thread = thread_call_stack::contains(this)) + { + static_cast<thread_info*>(this_thread)->private_op_queue.push(ops); + return; + } + } +#endif // defined(ASIO_HAS_THREADS) + + mutex::scoped_lock lock(mutex_); + op_queue_.push(ops); + wake_one_thread_and_unlock(lock); + } +} + +void scheduler::do_dispatch( + scheduler::operation* op) +{ + work_started(); + mutex::scoped_lock lock(mutex_); + op_queue_.push(op); + wake_one_thread_and_unlock(lock); +} + +void scheduler::abandon_operations( + op_queue<scheduler::operation>& ops) +{ + op_queue<scheduler::operation> ops2; + ops2.push(ops); +} + +std::size_t scheduler::do_run_one(mutex::scoped_lock& lock, + scheduler::thread_info& this_thread, + const asio::error_code& ec) +{ + while (!stopped_) + { + if (!op_queue_.empty()) + { + // Prepare to execute first handler from queue. + operation* o = op_queue_.front(); + op_queue_.pop(); + bool more_handlers = (!op_queue_.empty()); + + if (o == &task_operation_) + { + task_interrupted_ = more_handlers; + + if (more_handlers && !one_thread_) + wakeup_event_.unlock_and_signal_one(lock); + else + lock.unlock(); + + task_cleanup on_exit = { this, &lock, &this_thread }; + (void)on_exit; + + // Run the task. May throw an exception. Only block if the operation + // queue is empty and we're not polling, otherwise we want to return + // as soon as possible. + task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue); + } + else + { + std::size_t task_result = o->task_result_; + + if (more_handlers && !one_thread_) + wake_one_thread_and_unlock(lock); + else + lock.unlock(); + + // Ensure the count of outstanding work is decremented on block exit. + work_cleanup on_exit = { this, &lock, &this_thread }; + (void)on_exit; + + // Complete the operation. May throw an exception. Deletes the object. + o->complete(this, ec, task_result); + + return 1; + } + } + else + { + wakeup_event_.clear(lock); + wakeup_event_.wait(lock); + } + } + + return 0; +} + +std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock, + scheduler::thread_info& this_thread, long usec, + const asio::error_code& ec) +{ + if (stopped_) + return 0; + + operation* o = op_queue_.front(); + if (o == 0) + { + wakeup_event_.clear(lock); + wakeup_event_.wait_for_usec(lock, usec); + usec = 0; // Wait at most once. + o = op_queue_.front(); + } + + if (o == &task_operation_) + { + op_queue_.pop(); + bool more_handlers = (!op_queue_.empty()); + + task_interrupted_ = more_handlers; + + if (more_handlers && !one_thread_) + wakeup_event_.unlock_and_signal_one(lock); + else + lock.unlock(); + + { + task_cleanup on_exit = { this, &lock, &this_thread }; + (void)on_exit; + + // Run the task. May throw an exception. Only block if the operation + // queue is empty and we're not polling, otherwise we want to return + // as soon as possible. + task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue); + } + + o = op_queue_.front(); + if (o == &task_operation_) + { + if (!one_thread_) + wakeup_event_.maybe_unlock_and_signal_one(lock); + return 0; + } + } + + if (o == 0) + return 0; + + op_queue_.pop(); + bool more_handlers = (!op_queue_.empty()); + + std::size_t task_result = o->task_result_; + + if (more_handlers && !one_thread_) + wake_one_thread_and_unlock(lock); + else + lock.unlock(); + + // Ensure the count of outstanding work is decremented on block exit. + work_cleanup on_exit = { this, &lock, &this_thread }; + (void)on_exit; + + // Complete the operation. May throw an exception. Deletes the object. + o->complete(this, ec, task_result); + + return 1; +} + +std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock, + scheduler::thread_info& this_thread, + const asio::error_code& ec) +{ + if (stopped_) + return 0; + + operation* o = op_queue_.front(); + if (o == &task_operation_) + { + op_queue_.pop(); + lock.unlock(); + + { + task_cleanup c = { this, &lock, &this_thread }; + (void)c; + + // Run the task. May throw an exception. Only block if the operation + // queue is empty and we're not polling, otherwise we want to return + // as soon as possible. + task_->run(0, this_thread.private_op_queue); + } + + o = op_queue_.front(); + if (o == &task_operation_) + { + wakeup_event_.maybe_unlock_and_signal_one(lock); + return 0; + } + } + + if (o == 0) + return 0; + + op_queue_.pop(); + bool more_handlers = (!op_queue_.empty()); + + std::size_t task_result = o->task_result_; + + if (more_handlers && !one_thread_) + wake_one_thread_and_unlock(lock); + else + lock.unlock(); + + // Ensure the count of outstanding work is decremented on block exit. + work_cleanup on_exit = { this, &lock, &this_thread }; + (void)on_exit; + + // Complete the operation. May throw an exception. Deletes the object. + o->complete(this, ec, task_result); + + return 1; +} + +void scheduler::stop_all_threads( + mutex::scoped_lock& lock) +{ + stopped_ = true; + wakeup_event_.signal_all(lock); + + if (!task_interrupted_ && task_) + { + task_interrupted_ = true; + task_->interrupt(); + } +} + +void scheduler::wake_one_thread_and_unlock( + mutex::scoped_lock& lock) +{ + if (!wakeup_event_.maybe_unlock_and_signal_one(lock)) + { + if (!task_interrupted_ && task_) + { + task_interrupted_ = true; + task_->interrupt(); + } + lock.unlock(); + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP diff --git a/lib/asio/detail/impl/select_reactor.hpp b/lib/asio/detail/impl/select_reactor.hpp new file mode 100644 index 0000000..04a04d4 --- /dev/null +++ b/lib/asio/detail/impl/select_reactor.hpp @@ -0,0 +1,100 @@ +// +// detail/impl/select_reactor.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP +#define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) \ + || (!defined(ASIO_HAS_DEV_POLL) \ + && !defined(ASIO_HAS_EPOLL) \ + && !defined(ASIO_HAS_KQUEUE) \ + && !defined(ASIO_WINDOWS_RUNTIME)) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Time_Traits> +void select_reactor::add_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_add_timer_queue(queue); +} + +// Remove a timer queue from the reactor. +template <typename Time_Traits> +void select_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_remove_timer_queue(queue); +} + +template <typename Time_Traits> +void select_reactor::schedule_timer(timer_queue<Time_Traits>& queue, + const typename Time_Traits::time_type& time, + typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + scheduler_.post_immediate_completion(op, false); + return; + } + + bool earliest = queue.enqueue_timer(time, timer, op); + scheduler_.work_started(); + if (earliest) + interrupter_.interrupt(); +} + +template <typename Time_Traits> +std::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& timer, + std::size_t max_cancelled) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); + lock.unlock(); + scheduler_.post_deferred_completions(ops); + return n; +} + +template <typename Time_Traits> +void select_reactor::move_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& target, + typename timer_queue<Time_Traits>::per_timer_data& source) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + queue.cancel_timer(target, ops); + queue.move_timer(target, source); + lock.unlock(); + scheduler_.post_deferred_completions(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) + // || (!defined(ASIO_HAS_DEV_POLL) + // && !defined(ASIO_HAS_EPOLL) + // && !defined(ASIO_HAS_KQUEUE) + // && !defined(ASIO_WINDOWS_RUNTIME)) + +#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP diff --git a/lib/asio/detail/impl/select_reactor.ipp b/lib/asio/detail/impl/select_reactor.ipp new file mode 100644 index 0000000..262bc69 --- /dev/null +++ b/lib/asio/detail/impl/select_reactor.ipp @@ -0,0 +1,333 @@ +// +// detail/impl/select_reactor.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP +#define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) \ + || (!defined(ASIO_HAS_DEV_POLL) \ + && !defined(ASIO_HAS_EPOLL) \ + && !defined(ASIO_HAS_KQUEUE) \ + && !defined(ASIO_WINDOWS_RUNTIME)) + +#include "asio/detail/fd_set_adapter.hpp" +#include "asio/detail/select_reactor.hpp" +#include "asio/detail/signal_blocker.hpp" +#include "asio/detail/socket_ops.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +#if defined(ASIO_HAS_IOCP) +class select_reactor::thread_function +{ +public: + explicit thread_function(select_reactor* r) + : this_(r) + { + } + + void operator()() + { + this_->run_thread(); + } + +private: + select_reactor* this_; +}; +#endif // defined(ASIO_HAS_IOCP) + +select_reactor::select_reactor(asio::execution_context& ctx) + : execution_context_service_base<select_reactor>(ctx), + scheduler_(use_service<scheduler_type>(ctx)), + mutex_(), + interrupter_(), +#if defined(ASIO_HAS_IOCP) + stop_thread_(false), + thread_(0), +#endif // defined(ASIO_HAS_IOCP) + shutdown_(false) +{ +#if defined(ASIO_HAS_IOCP) + asio::detail::signal_blocker sb; + thread_ = new asio::detail::thread(thread_function(this)); +#endif // defined(ASIO_HAS_IOCP) +} + +select_reactor::~select_reactor() +{ + shutdown(); +} + +void select_reactor::shutdown() +{ + asio::detail::mutex::scoped_lock lock(mutex_); + shutdown_ = true; +#if defined(ASIO_HAS_IOCP) + stop_thread_ = true; +#endif // defined(ASIO_HAS_IOCP) + lock.unlock(); + +#if defined(ASIO_HAS_IOCP) + if (thread_) + { + interrupter_.interrupt(); + thread_->join(); + delete thread_; + thread_ = 0; + } +#endif // defined(ASIO_HAS_IOCP) + + op_queue<operation> ops; + + for (int i = 0; i < max_ops; ++i) + op_queue_[i].get_all_operations(ops); + + timer_queues_.get_all_timers(ops); + + scheduler_.abandon_operations(ops); +} + +void select_reactor::notify_fork( + asio::execution_context::fork_event fork_ev) +{ + if (fork_ev == asio::execution_context::fork_child) + interrupter_.recreate(); +} + +void select_reactor::init_task() +{ + scheduler_.init_task(); +} + +int select_reactor::register_descriptor(socket_type, + select_reactor::per_descriptor_data&) +{ + return 0; +} + +int select_reactor::register_internal_descriptor( + int op_type, socket_type descriptor, + select_reactor::per_descriptor_data&, reactor_op* op) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + op_queue_[op_type].enqueue_operation(descriptor, op); + interrupter_.interrupt(); + + return 0; +} + +void select_reactor::move_descriptor(socket_type, + select_reactor::per_descriptor_data&, + select_reactor::per_descriptor_data&) +{ +} + +void select_reactor::start_op(int op_type, socket_type descriptor, + select_reactor::per_descriptor_data&, reactor_op* op, + bool is_continuation, bool) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + post_immediate_completion(op, is_continuation); + return; + } + + bool first = op_queue_[op_type].enqueue_operation(descriptor, op); + scheduler_.work_started(); + if (first) + interrupter_.interrupt(); +} + +void select_reactor::cancel_ops(socket_type descriptor, + select_reactor::per_descriptor_data&) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + cancel_ops_unlocked(descriptor, asio::error::operation_aborted); +} + +void select_reactor::deregister_descriptor(socket_type descriptor, + select_reactor::per_descriptor_data&, bool) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + cancel_ops_unlocked(descriptor, asio::error::operation_aborted); +} + +void select_reactor::deregister_internal_descriptor( + socket_type descriptor, select_reactor::per_descriptor_data&) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + op_queue_[i].cancel_operations(descriptor, ops); +} + +void select_reactor::cleanup_descriptor_data( + select_reactor::per_descriptor_data&) +{ +} + +void select_reactor::run(long usec, op_queue<operation>& ops) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + +#if defined(ASIO_HAS_IOCP) + // Check if the thread is supposed to stop. + if (stop_thread_) + return; +#endif // defined(ASIO_HAS_IOCP) + + // Set up the descriptor sets. + for (int i = 0; i < max_select_ops; ++i) + fd_sets_[i].reset(); + fd_sets_[read_op].set(interrupter_.read_descriptor()); + socket_type max_fd = 0; + bool have_work_to_do = !timer_queues_.all_empty(); + for (int i = 0; i < max_select_ops; ++i) + { + have_work_to_do = have_work_to_do || !op_queue_[i].empty(); + fd_sets_[i].set(op_queue_[i], ops); + if (fd_sets_[i].max_descriptor() > max_fd) + max_fd = fd_sets_[i].max_descriptor(); + } + +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // Connection operations on Windows use both except and write fd_sets. + have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty(); + fd_sets_[write_op].set(op_queue_[connect_op], ops); + if (fd_sets_[write_op].max_descriptor() > max_fd) + max_fd = fd_sets_[write_op].max_descriptor(); + fd_sets_[except_op].set(op_queue_[connect_op], ops); + if (fd_sets_[except_op].max_descriptor() > max_fd) + max_fd = fd_sets_[except_op].max_descriptor(); +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + + // We can return immediately if there's no work to do and the reactor is + // not supposed to block. + if (!usec && !have_work_to_do) + return; + + // Determine how long to block while waiting for events. + timeval tv_buf = { 0, 0 }; + timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf; + + lock.unlock(); + + // Block on the select call until descriptors become ready. + asio::error_code ec; + int retval = socket_ops::select(static_cast<int>(max_fd + 1), + fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec); + + // Reset the interrupter. + if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor())) + { + interrupter_.reset(); + --retval; + } + + lock.lock(); + + // Dispatch all ready operations. + if (retval > 0) + { +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // Connection operations on Windows use both except and write fd_sets. + fd_sets_[except_op].perform(op_queue_[connect_op], ops); + fd_sets_[write_op].perform(op_queue_[connect_op], ops); +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + + // Exception operations must be processed first to ensure that any + // out-of-band data is read before normal data. + for (int i = max_select_ops - 1; i >= 0; --i) + fd_sets_[i].perform(op_queue_[i], ops); + } + timer_queues_.get_ready_timers(ops); +} + +void select_reactor::interrupt() +{ + interrupter_.interrupt(); +} + +#if defined(ASIO_HAS_IOCP) +void select_reactor::run_thread() +{ + asio::detail::mutex::scoped_lock lock(mutex_); + while (!stop_thread_) + { + lock.unlock(); + op_queue<operation> ops; + run(true, ops); + scheduler_.post_deferred_completions(ops); + lock.lock(); + } +} +#endif // defined(ASIO_HAS_IOCP) + +void select_reactor::do_add_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.insert(&queue); +} + +void select_reactor::do_remove_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.erase(&queue); +} + +timeval* select_reactor::get_timeout(long usec, timeval& tv) +{ + // By default we will wait no longer than 5 minutes. This will ensure that + // any changes to the system clock are detected after no longer than this. + const long max_usec = 5 * 60 * 1000 * 1000; + usec = timer_queues_.wait_duration_usec( + (usec < 0 || max_usec < usec) ? max_usec : usec); + tv.tv_sec = usec / 1000000; + tv.tv_usec = usec % 1000000; + return &tv; +} + +void select_reactor::cancel_ops_unlocked(socket_type descriptor, + const asio::error_code& ec) +{ + bool need_interrupt = false; + op_queue<operation> ops; + for (int i = 0; i < max_ops; ++i) + need_interrupt = op_queue_[i].cancel_operations( + descriptor, ops, ec) || need_interrupt; + scheduler_.post_deferred_completions(ops); + if (need_interrupt) + interrupter_.interrupt(); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) + // || (!defined(ASIO_HAS_DEV_POLL) + // && !defined(ASIO_HAS_EPOLL) + // && !defined(ASIO_HAS_KQUEUE)) + // && !defined(ASIO_WINDOWS_RUNTIME)) + +#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP diff --git a/lib/asio/detail/impl/service_registry.hpp b/lib/asio/detail/impl/service_registry.hpp new file mode 100644 index 0000000..d4db589 --- /dev/null +++ b/lib/asio/detail/impl/service_registry.hpp @@ -0,0 +1,94 @@ +// +// detail/impl/service_registry.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP +#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Service> +Service& service_registry::use_service() +{ + execution_context::service::key key; + init_key<Service>(key, 0); + factory_type factory = &service_registry::create<Service, execution_context>; + return *static_cast<Service*>(do_use_service(key, factory, &owner_)); +} + +template <typename Service> +Service& service_registry::use_service(io_context& owner) +{ + execution_context::service::key key; + init_key<Service>(key, 0); + factory_type factory = &service_registry::create<Service, io_context>; + return *static_cast<Service*>(do_use_service(key, factory, &owner)); +} + +template <typename Service> +void service_registry::add_service(Service* new_service) +{ + execution_context::service::key key; + init_key<Service>(key, 0); + return do_add_service(key, new_service); +} + +template <typename Service> +bool service_registry::has_service() const +{ + execution_context::service::key key; + init_key<Service>(key, 0); + return do_has_service(key); +} + +template <typename Service> +inline void service_registry::init_key( + execution_context::service::key& key, ...) +{ + init_key_from_id(key, Service::id); +} + +#if !defined(ASIO_NO_TYPEID) +template <typename Service> +void service_registry::init_key(execution_context::service::key& key, + typename enable_if< + is_base_of<typename Service::key_type, Service>::value>::type*) +{ + key.type_info_ = &typeid(typeid_wrapper<Service>); + key.id_ = 0; +} + +template <typename Service> +void service_registry::init_key_from_id(execution_context::service::key& key, + const service_id<Service>& /*id*/) +{ + key.type_info_ = &typeid(typeid_wrapper<Service>); + key.id_ = 0; +} +#endif // !defined(ASIO_NO_TYPEID) + +template <typename Service, typename Owner> +execution_context::service* service_registry::create(void* owner) +{ + return new Service(*static_cast<Owner*>(owner)); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP diff --git a/lib/asio/detail/impl/service_registry.ipp b/lib/asio/detail/impl/service_registry.ipp new file mode 100644 index 0000000..a465033 --- /dev/null +++ b/lib/asio/detail/impl/service_registry.ipp @@ -0,0 +1,197 @@ +// +// detail/impl/service_registry.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP +#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include <vector> +#include "asio/detail/service_registry.hpp" +#include "asio/detail/throw_exception.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +service_registry::service_registry(execution_context& owner) + : owner_(owner), + first_service_(0) +{ +} + +service_registry::~service_registry() +{ +} + +void service_registry::shutdown_services() +{ + execution_context::service* service = first_service_; + while (service) + { + service->shutdown(); + service = service->next_; + } +} + +void service_registry::destroy_services() +{ + while (first_service_) + { + execution_context::service* next_service = first_service_->next_; + destroy(first_service_); + first_service_ = next_service; + } +} + +void service_registry::notify_fork(execution_context::fork_event fork_ev) +{ + // Make a copy of all of the services while holding the lock. We don't want + // to hold the lock while calling into each service, as it may try to call + // back into this class. + std::vector<execution_context::service*> services; + { + asio::detail::mutex::scoped_lock lock(mutex_); + execution_context::service* service = first_service_; + while (service) + { + services.push_back(service); + service = service->next_; + } + } + + // If processing the fork_prepare event, we want to go in reverse order of + // service registration, which happens to be the existing order of the + // services in the vector. For the other events we want to go in the other + // direction. + std::size_t num_services = services.size(); + if (fork_ev == execution_context::fork_prepare) + for (std::size_t i = 0; i < num_services; ++i) + services[i]->notify_fork(fork_ev); + else + for (std::size_t i = num_services; i > 0; --i) + services[i - 1]->notify_fork(fork_ev); +} + +void service_registry::init_key_from_id(execution_context::service::key& key, + const execution_context::id& id) +{ + key.type_info_ = 0; + key.id_ = &id; +} + +bool service_registry::keys_match( + const execution_context::service::key& key1, + const execution_context::service::key& key2) +{ + if (key1.id_ && key2.id_) + if (key1.id_ == key2.id_) + return true; + if (key1.type_info_ && key2.type_info_) + if (*key1.type_info_ == *key2.type_info_) + return true; + return false; +} + +void service_registry::destroy(execution_context::service* service) +{ + delete service; +} + +execution_context::service* service_registry::do_use_service( + const execution_context::service::key& key, + factory_type factory, void* owner) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + // First see if there is an existing service object with the given key. + execution_context::service* service = first_service_; + while (service) + { + if (keys_match(service->key_, key)) + return service; + service = service->next_; + } + + // Create a new service object. The service registry's mutex is not locked + // at this time to allow for nested calls into this function from the new + // service's constructor. + lock.unlock(); + auto_service_ptr new_service = { factory(owner) }; + new_service.ptr_->key_ = key; + lock.lock(); + + // Check that nobody else created another service object of the same type + // while the lock was released. + service = first_service_; + while (service) + { + if (keys_match(service->key_, key)) + return service; + service = service->next_; + } + + // Service was successfully initialised, pass ownership to registry. + new_service.ptr_->next_ = first_service_; + first_service_ = new_service.ptr_; + new_service.ptr_ = 0; + return first_service_; +} + +void service_registry::do_add_service( + const execution_context::service::key& key, + execution_context::service* new_service) +{ + if (&owner_ != &new_service->context()) + asio::detail::throw_exception(invalid_service_owner()); + + asio::detail::mutex::scoped_lock lock(mutex_); + + // Check if there is an existing service object with the given key. + execution_context::service* service = first_service_; + while (service) + { + if (keys_match(service->key_, key)) + asio::detail::throw_exception(service_already_exists()); + service = service->next_; + } + + // Take ownership of the service object. + new_service->key_ = key; + new_service->next_ = first_service_; + first_service_ = new_service; +} + +bool service_registry::do_has_service( + const execution_context::service::key& key) const +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + execution_context::service* service = first_service_; + while (service) + { + if (keys_match(service->key_, key)) + return true; + service = service->next_; + } + + return false; +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP diff --git a/lib/asio/detail/impl/signal_set_service.ipp b/lib/asio/detail/impl/signal_set_service.ipp new file mode 100644 index 0000000..dd68fc1 --- /dev/null +++ b/lib/asio/detail/impl/signal_set_service.ipp @@ -0,0 +1,669 @@ +// +// detail/impl/signal_set_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP +#define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#include <cstring> +#include <stdexcept> +#include "asio/detail/reactor.hpp" +#include "asio/detail/signal_blocker.hpp" +#include "asio/detail/signal_set_service.hpp" +#include "asio/detail/static_mutex.hpp" +#include "asio/detail/throw_exception.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +struct signal_state +{ + // Mutex used for protecting global state. + static_mutex mutex_; + + // The read end of the pipe used for signal notifications. + int read_descriptor_; + + // The write end of the pipe used for signal notifications. + int write_descriptor_; + + // Whether the signal state has been prepared for a fork. + bool fork_prepared_; + + // The head of a linked list of all signal_set_service instances. + class signal_set_service* service_list_; + + // A count of the number of objects that are registered for each signal. + std::size_t registration_count_[max_signal_number]; +}; + +signal_state* get_signal_state() +{ + static signal_state state = { + ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } }; + return &state; +} + +void asio_signal_handler(int signal_number) +{ +#if defined(ASIO_WINDOWS) \ + || defined(ASIO_WINDOWS_RUNTIME) \ + || defined(__CYGWIN__) + signal_set_service::deliver_signal(signal_number); +#else // defined(ASIO_WINDOWS) + // || defined(ASIO_WINDOWS_RUNTIME) + // || defined(__CYGWIN__) + int saved_errno = errno; + signal_state* state = get_signal_state(); + signed_size_type result = ::write(state->write_descriptor_, + &signal_number, sizeof(signal_number)); + (void)result; + errno = saved_errno; +#endif // defined(ASIO_WINDOWS) + // || defined(ASIO_WINDOWS_RUNTIME) + // || defined(__CYGWIN__) + +#if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) + ::signal(signal_number, asio_signal_handler); +#endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) +} + +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) +class signal_set_service::pipe_read_op : public reactor_op +{ +public: + pipe_read_op() + : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete) + { + } + + static status do_perform(reactor_op*) + { + signal_state* state = get_signal_state(); + + int fd = state->read_descriptor_; + int signal_number = 0; + while (::read(fd, &signal_number, sizeof(int)) == sizeof(int)) + if (signal_number >= 0 && signal_number < max_signal_number) + signal_set_service::deliver_signal(signal_number); + + return not_done; + } + + static void do_complete(void* /*owner*/, operation* base, + const asio::error_code& /*ec*/, + std::size_t /*bytes_transferred*/) + { + pipe_read_op* o(static_cast<pipe_read_op*>(base)); + delete o; + } +}; +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + +signal_set_service::signal_set_service( + asio::io_context& io_context) + : service_base<signal_set_service>(io_context), + io_context_(asio::use_service<io_context_impl>(io_context)), +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + reactor_(asio::use_service<reactor>(io_context)), +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + next_(0), + prev_(0) +{ + get_signal_state()->mutex_.init(); + +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + reactor_.init_task(); +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + + for (int i = 0; i < max_signal_number; ++i) + registrations_[i] = 0; + + add_service(this); +} + +signal_set_service::~signal_set_service() +{ + remove_service(this); +} + +void signal_set_service::shutdown() +{ + remove_service(this); + + op_queue<operation> ops; + + for (int i = 0; i < max_signal_number; ++i) + { + registration* reg = registrations_[i]; + while (reg) + { + ops.push(*reg->queue_); + reg = reg->next_in_table_; + } + } + + io_context_.abandon_operations(ops); +} + +void signal_set_service::notify_fork( + asio::io_context::fork_event fork_ev) +{ +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + switch (fork_ev) + { + case asio::io_context::fork_prepare: + { + int read_descriptor = state->read_descriptor_; + state->fork_prepared_ = true; + lock.unlock(); + reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_); + reactor_.cleanup_descriptor_data(reactor_data_); + } + break; + case asio::io_context::fork_parent: + if (state->fork_prepared_) + { + int read_descriptor = state->read_descriptor_; + state->fork_prepared_ = false; + lock.unlock(); + reactor_.register_internal_descriptor(reactor::read_op, + read_descriptor, reactor_data_, new pipe_read_op); + } + break; + case asio::io_context::fork_child: + if (state->fork_prepared_) + { + asio::detail::signal_blocker blocker; + close_descriptors(); + open_descriptors(); + int read_descriptor = state->read_descriptor_; + state->fork_prepared_ = false; + lock.unlock(); + reactor_.register_internal_descriptor(reactor::read_op, + read_descriptor, reactor_data_, new pipe_read_op); + } + break; + default: + break; + } +#else // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + (void)fork_ev; +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) +} + +void signal_set_service::construct( + signal_set_service::implementation_type& impl) +{ + impl.signals_ = 0; +} + +void signal_set_service::destroy( + signal_set_service::implementation_type& impl) +{ + asio::error_code ignored_ec; + clear(impl, ignored_ec); + cancel(impl, ignored_ec); +} + +asio::error_code signal_set_service::add( + signal_set_service::implementation_type& impl, + int signal_number, asio::error_code& ec) +{ + // Check that the signal number is valid. + if (signal_number < 0 || signal_number >= max_signal_number) + { + ec = asio::error::invalid_argument; + return ec; + } + + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + // Find the appropriate place to insert the registration. + registration** insertion_point = &impl.signals_; + registration* next = impl.signals_; + while (next && next->signal_number_ < signal_number) + { + insertion_point = &next->next_in_set_; + next = next->next_in_set_; + } + + // Only do something if the signal is not already registered. + if (next == 0 || next->signal_number_ != signal_number) + { + registration* new_registration = new registration; + +#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) + // Register for the signal if we're the first. + if (state->registration_count_[signal_number] == 0) + { +# if defined(ASIO_HAS_SIGACTION) + using namespace std; // For memset. + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = asio_signal_handler; + sigfillset(&sa.sa_mask); + if (::sigaction(signal_number, &sa, 0) == -1) +# else // defined(ASIO_HAS_SIGACTION) + if (::signal(signal_number, asio_signal_handler) == SIG_ERR) +# endif // defined(ASIO_HAS_SIGACTION) + { +# if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error::invalid_argument; +# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error_code(errno, + asio::error::get_system_category()); +# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + delete new_registration; + return ec; + } + } +#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) + + // Record the new registration in the set. + new_registration->signal_number_ = signal_number; + new_registration->queue_ = &impl.queue_; + new_registration->next_in_set_ = next; + *insertion_point = new_registration; + + // Insert registration into the registration table. + new_registration->next_in_table_ = registrations_[signal_number]; + if (registrations_[signal_number]) + registrations_[signal_number]->prev_in_table_ = new_registration; + registrations_[signal_number] = new_registration; + + ++state->registration_count_[signal_number]; + } + + ec = asio::error_code(); + return ec; +} + +asio::error_code signal_set_service::remove( + signal_set_service::implementation_type& impl, + int signal_number, asio::error_code& ec) +{ + // Check that the signal number is valid. + if (signal_number < 0 || signal_number >= max_signal_number) + { + ec = asio::error::invalid_argument; + return ec; + } + + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + // Find the signal number in the list of registrations. + registration** deletion_point = &impl.signals_; + registration* reg = impl.signals_; + while (reg && reg->signal_number_ < signal_number) + { + deletion_point = ®->next_in_set_; + reg = reg->next_in_set_; + } + + if (reg != 0 && reg->signal_number_ == signal_number) + { +#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) + // Set signal handler back to the default if we're the last. + if (state->registration_count_[signal_number] == 1) + { +# if defined(ASIO_HAS_SIGACTION) + using namespace std; // For memset. + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_DFL; + if (::sigaction(signal_number, &sa, 0) == -1) +# else // defined(ASIO_HAS_SIGACTION) + if (::signal(signal_number, SIG_DFL) == SIG_ERR) +# endif // defined(ASIO_HAS_SIGACTION) + { +# if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error::invalid_argument; +# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error_code(errno, + asio::error::get_system_category()); +# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + return ec; + } + } +#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) + + // Remove the registration from the set. + *deletion_point = reg->next_in_set_; + + // Remove the registration from the registration table. + if (registrations_[signal_number] == reg) + registrations_[signal_number] = reg->next_in_table_; + if (reg->prev_in_table_) + reg->prev_in_table_->next_in_table_ = reg->next_in_table_; + if (reg->next_in_table_) + reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; + + --state->registration_count_[signal_number]; + + delete reg; + } + + ec = asio::error_code(); + return ec; +} + +asio::error_code signal_set_service::clear( + signal_set_service::implementation_type& impl, + asio::error_code& ec) +{ + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + while (registration* reg = impl.signals_) + { +#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) + // Set signal handler back to the default if we're the last. + if (state->registration_count_[reg->signal_number_] == 1) + { +# if defined(ASIO_HAS_SIGACTION) + using namespace std; // For memset. + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_DFL; + if (::sigaction(reg->signal_number_, &sa, 0) == -1) +# else // defined(ASIO_HAS_SIGACTION) + if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR) +# endif // defined(ASIO_HAS_SIGACTION) + { +# if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error::invalid_argument; +# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error_code(errno, + asio::error::get_system_category()); +# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + return ec; + } + } +#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) + + // Remove the registration from the registration table. + if (registrations_[reg->signal_number_] == reg) + registrations_[reg->signal_number_] = reg->next_in_table_; + if (reg->prev_in_table_) + reg->prev_in_table_->next_in_table_ = reg->next_in_table_; + if (reg->next_in_table_) + reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; + + --state->registration_count_[reg->signal_number_]; + + impl.signals_ = reg->next_in_set_; + delete reg; + } + + ec = asio::error_code(); + return ec; +} + +asio::error_code signal_set_service::cancel( + signal_set_service::implementation_type& impl, + asio::error_code& ec) +{ + ASIO_HANDLER_OPERATION((io_context_.context(), + "signal_set", &impl, 0, "cancel")); + + op_queue<operation> ops; + { + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + while (signal_op* op = impl.queue_.front()) + { + op->ec_ = asio::error::operation_aborted; + impl.queue_.pop(); + ops.push(op); + } + } + + io_context_.post_deferred_completions(ops); + + ec = asio::error_code(); + return ec; +} + +void signal_set_service::deliver_signal(int signal_number) +{ + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + signal_set_service* service = state->service_list_; + while (service) + { + op_queue<operation> ops; + + registration* reg = service->registrations_[signal_number]; + while (reg) + { + if (reg->queue_->empty()) + { + ++reg->undelivered_; + } + else + { + while (signal_op* op = reg->queue_->front()) + { + op->signal_number_ = signal_number; + reg->queue_->pop(); + ops.push(op); + } + } + + reg = reg->next_in_table_; + } + + service->io_context_.post_deferred_completions(ops); + + service = service->next_; + } +} + +void signal_set_service::add_service(signal_set_service* service) +{ + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + +#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) + // If this is the first service to be created, open a new pipe. + if (state->service_list_ == 0) + open_descriptors(); +#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) + + // If an io_context object is thread-unsafe then it must be the only + // io_context used to create signal_set objects. + if (state->service_list_ != 0) + { + if (!ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER, + service->io_context_.concurrency_hint()) + || !ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER, + state->service_list_->io_context_.concurrency_hint())) + { + std::logic_error ex( + "Thread-unsafe io_context objects require " + "exclusive access to signal handling."); + asio::detail::throw_exception(ex); + } + } + + // Insert service into linked list of all services. + service->next_ = state->service_list_; + service->prev_ = 0; + if (state->service_list_) + state->service_list_->prev_ = service; + state->service_list_ = service; + +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + // Register for pipe readiness notifications. + int read_descriptor = state->read_descriptor_; + lock.unlock(); + service->reactor_.register_internal_descriptor(reactor::read_op, + read_descriptor, service->reactor_data_, new pipe_read_op); +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) +} + +void signal_set_service::remove_service(signal_set_service* service) +{ + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + if (service->next_ || service->prev_ || state->service_list_ == service) + { +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + // Disable the pipe readiness notifications. + int read_descriptor = state->read_descriptor_; + lock.unlock(); + service->reactor_.deregister_internal_descriptor( + read_descriptor, service->reactor_data_); + service->reactor_.cleanup_descriptor_data(service->reactor_data_); + lock.lock(); +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) + + // Remove service from linked list of all services. + if (state->service_list_ == service) + state->service_list_ = service->next_; + if (service->prev_) + service->prev_->next_ = service->next_; + if (service->next_) + service->next_->prev_= service->prev_; + service->next_ = 0; + service->prev_ = 0; + +#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) + // If this is the last service to be removed, close the pipe. + if (state->service_list_ == 0) + close_descriptors(); +#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) + } +} + +void signal_set_service::open_descriptors() +{ +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + signal_state* state = get_signal_state(); + + int pipe_fds[2]; + if (::pipe(pipe_fds) == 0) + { + state->read_descriptor_ = pipe_fds[0]; + ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK); + + state->write_descriptor_ = pipe_fds[1]; + ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK); + +#if defined(FD_CLOEXEC) + ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC); + ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC); +#endif // defined(FD_CLOEXEC) + } + else + { + asio::error_code ec(errno, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "signal_set_service pipe"); + } +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) +} + +void signal_set_service::close_descriptors() +{ +#if !defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_RUNTIME) \ + && !defined(__CYGWIN__) + signal_state* state = get_signal_state(); + + if (state->read_descriptor_ != -1) + ::close(state->read_descriptor_); + state->read_descriptor_ = -1; + + if (state->write_descriptor_ != -1) + ::close(state->write_descriptor_); + state->write_descriptor_ = -1; +#endif // !defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_RUNTIME) + // && !defined(__CYGWIN__) +} + +void signal_set_service::start_wait_op( + signal_set_service::implementation_type& impl, signal_op* op) +{ + io_context_.work_started(); + + signal_state* state = get_signal_state(); + static_mutex::scoped_lock lock(state->mutex_); + + registration* reg = impl.signals_; + while (reg) + { + if (reg->undelivered_ > 0) + { + --reg->undelivered_; + op->signal_number_ = reg->signal_number_; + io_context_.post_deferred_completion(op); + return; + } + + reg = reg->next_in_set_; + } + + impl.queue_.push(op); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP diff --git a/lib/asio/detail/impl/socket_ops.ipp b/lib/asio/detail/impl/socket_ops.ipp new file mode 100644 index 0000000..5e74733 --- /dev/null +++ b/lib/asio/detail/impl/socket_ops.ipp @@ -0,0 +1,3571 @@ +// +// detail/impl/socket_ops.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_SOCKET_OPS_IPP +#define ASIO_DETAIL_SOCKET_OPS_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#include <cctype> +#include <cstdio> +#include <cstdlib> +#include <cstring> +#include <cerrno> +#include <new> +#include "asio/detail/assert.hpp" +#include "asio/detail/socket_ops.hpp" +#include "asio/error.hpp" + +#if defined(ASIO_WINDOWS_RUNTIME) +# include <codecvt> +# include <locale> +# include <string> +#endif // defined(ASIO_WINDOWS_RUNTIME) + +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \ + || defined(__MACH__) && defined(__APPLE__) +# if defined(ASIO_HAS_PTHREADS) +# include <pthread.h> +# endif // defined(ASIO_HAS_PTHREADS) +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // || defined(__MACH__) && defined(__APPLE__) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { +namespace socket_ops { + +#if !defined(ASIO_WINDOWS_RUNTIME) + +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) +struct msghdr { int msg_namelen; }; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + +#if defined(__hpux) +// HP-UX doesn't declare these functions extern "C", so they are declared again +// here to avoid linker errors about undefined symbols. +extern "C" char* if_indextoname(unsigned int, char*); +extern "C" unsigned int if_nametoindex(const char*); +#endif // defined(__hpux) + +#endif // !defined(ASIO_WINDOWS_RUNTIME) + +inline void clear_last_error() +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + WSASetLastError(0); +#else + errno = 0; +#endif +} + +#if !defined(ASIO_WINDOWS_RUNTIME) + +template <typename ReturnType> +inline ReturnType error_wrapper(ReturnType return_value, + asio::error_code& ec) +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ec = asio::error_code(WSAGetLastError(), + asio::error::get_system_category()); +#else + ec = asio::error_code(errno, + asio::error::get_system_category()); +#endif + return return_value; +} + +template <typename SockLenType> +inline socket_type call_accept(SockLenType msghdr::*, + socket_type s, socket_addr_type* addr, std::size_t* addrlen) +{ + SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0; + socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0); + if (addrlen) + *addrlen = (std::size_t)tmp_addrlen; + return result; +} + +socket_type accept(socket_type s, socket_addr_type* addr, + std::size_t* addrlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return invalid_socket; + } + + clear_last_error(); + + socket_type new_s = error_wrapper(call_accept( + &msghdr::msg_namelen, s, addr, addrlen), ec); + if (new_s == invalid_socket) + return new_s; + +#if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) + int optval = 1; + int result = error_wrapper(::setsockopt(new_s, + SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); + if (result != 0) + { + ::close(new_s); + return invalid_socket; + } +#endif + + ec = asio::error_code(); + return new_s; +} + +socket_type sync_accept(socket_type s, state_type state, + socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) +{ + // Accept a socket. + for (;;) + { + // Try to complete the operation without blocking. + socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec); + + // Check if operation succeeded. + if (new_socket != invalid_socket) + return new_socket; + + // Operation failed. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + { + if (state & user_set_non_blocking) + return invalid_socket; + // Fall through to retry operation. + } + else if (ec == asio::error::connection_aborted) + { + if (state & enable_connection_aborted) + return invalid_socket; + // Fall through to retry operation. + } +#if defined(EPROTO) + else if (ec.value() == EPROTO) + { + if (state & enable_connection_aborted) + return invalid_socket; + // Fall through to retry operation. + } +#endif // defined(EPROTO) + else + return invalid_socket; + + // Wait for socket to become ready. + if (socket_ops::poll_read(s, 0, -1, ec) < 0) + return invalid_socket; + } +} + +#if defined(ASIO_HAS_IOCP) + +void complete_iocp_accept(socket_type s, + void* output_buffer, DWORD address_length, + socket_addr_type* addr, std::size_t* addrlen, + socket_type new_socket, asio::error_code& ec) +{ + // Map non-portable errors to their portable counterparts. + if (ec.value() == ERROR_NETNAME_DELETED) + ec = asio::error::connection_aborted; + + if (!ec) + { + // Get the address of the peer. + if (addr && addrlen) + { + LPSOCKADDR local_addr = 0; + int local_addr_length = 0; + LPSOCKADDR remote_addr = 0; + int remote_addr_length = 0; + GetAcceptExSockaddrs(output_buffer, 0, address_length, + address_length, &local_addr, &local_addr_length, + &remote_addr, &remote_addr_length); + if (static_cast<std::size_t>(remote_addr_length) > *addrlen) + { + ec = asio::error::invalid_argument; + } + else + { + using namespace std; // For memcpy. + memcpy(addr, remote_addr, remote_addr_length); + *addrlen = static_cast<std::size_t>(remote_addr_length); + } + } + + // Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname + // and getpeername will work on the accepted socket. + SOCKET update_ctx_param = s; + socket_ops::state_type state = 0; + socket_ops::setsockopt(new_socket, state, + SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, + &update_ctx_param, sizeof(SOCKET), ec); + } +} + +#else // defined(ASIO_HAS_IOCP) + +bool non_blocking_accept(socket_type s, + state_type state, socket_addr_type* addr, std::size_t* addrlen, + asio::error_code& ec, socket_type& new_socket) +{ + for (;;) + { + // Accept the waiting connection. + new_socket = socket_ops::accept(s, addr, addrlen, ec); + + // Check if operation succeeded. + if (new_socket != invalid_socket) + return true; + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Operation failed. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + { + // Fall through to retry operation. + } + else if (ec == asio::error::connection_aborted) + { + if (state & enable_connection_aborted) + return true; + // Fall through to retry operation. + } +#if defined(EPROTO) + else if (ec.value() == EPROTO) + { + if (state & enable_connection_aborted) + return true; + // Fall through to retry operation. + } +#endif // defined(EPROTO) + else + return true; + + return false; + } +} + +#endif // defined(ASIO_HAS_IOCP) + +template <typename SockLenType> +inline int call_bind(SockLenType msghdr::*, + socket_type s, const socket_addr_type* addr, std::size_t addrlen) +{ + return ::bind(s, addr, (SockLenType)addrlen); +} + +int bind(socket_type s, const socket_addr_type* addr, + std::size_t addrlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + clear_last_error(); + int result = error_wrapper(call_bind( + &msghdr::msg_namelen, s, addr, addrlen), ec); + if (result == 0) + ec = asio::error_code(); + return result; +} + +int close(socket_type s, state_type& state, + bool destruction, asio::error_code& ec) +{ + int result = 0; + if (s != invalid_socket) + { + // We don't want the destructor to block, so set the socket to linger in + // the background. If the user doesn't like this behaviour then they need + // to explicitly close the socket. + if (destruction && (state & user_set_linger)) + { + ::linger opt; + opt.l_onoff = 0; + opt.l_linger = 0; + asio::error_code ignored_ec; + socket_ops::setsockopt(s, state, SOL_SOCKET, + SO_LINGER, &opt, sizeof(opt), ignored_ec); + } + + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + result = error_wrapper(::closesocket(s), ec); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + result = error_wrapper(::close(s), ec); +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + + if (result != 0 + && (ec == asio::error::would_block + || ec == asio::error::try_again)) + { + // According to UNIX Network Programming Vol. 1, it is possible for + // close() to fail with EWOULDBLOCK under certain circumstances. What + // isn't clear is the state of the descriptor after this error. The one + // current OS where this behaviour is seen, Windows, says that the socket + // remains open. Therefore we'll put the descriptor back into blocking + // mode and have another attempt at closing it. +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ioctl_arg_type arg = 0; + ::ioctlsocket(s, FIONBIO, &arg); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +# if defined(__SYMBIAN32__) + int flags = ::fcntl(s, F_GETFL, 0); + if (flags >= 0) + ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK); +# else // defined(__SYMBIAN32__) + ioctl_arg_type arg = 0; + ::ioctl(s, FIONBIO, &arg); +# endif // defined(__SYMBIAN32__) +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + state &= ~non_blocking; + + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + result = error_wrapper(::closesocket(s), ec); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + result = error_wrapper(::close(s), ec); +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + } + } + + if (result == 0) + ec = asio::error_code(); + return result; +} + +bool set_user_non_blocking(socket_type s, + state_type& state, bool value, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return false; + } + + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ioctl_arg_type arg = (value ? 1 : 0); + int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); +#elif defined(__SYMBIAN32__) + int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); + if (result >= 0) + { + clear_last_error(); + int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); + result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); + } +#else + ioctl_arg_type arg = (value ? 1 : 0); + int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); +#endif + + if (result >= 0) + { + ec = asio::error_code(); + if (value) + state |= user_set_non_blocking; + else + { + // Clearing the user-set non-blocking mode always overrides any + // internally-set non-blocking flag. Any subsequent asynchronous + // operations will need to re-enable non-blocking I/O. + state &= ~(user_set_non_blocking | internal_non_blocking); + } + return true; + } + + return false; +} + +bool set_internal_non_blocking(socket_type s, + state_type& state, bool value, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return false; + } + + if (!value && (state & user_set_non_blocking)) + { + // It does not make sense to clear the internal non-blocking flag if the + // user still wants non-blocking behaviour. Return an error and let the + // caller figure out whether to update the user-set non-blocking flag. + ec = asio::error::invalid_argument; + return false; + } + + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + ioctl_arg_type arg = (value ? 1 : 0); + int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); +#elif defined(__SYMBIAN32__) + int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); + if (result >= 0) + { + clear_last_error(); + int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); + result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); + } +#else + ioctl_arg_type arg = (value ? 1 : 0); + int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); +#endif + + if (result >= 0) + { + ec = asio::error_code(); + if (value) + state |= internal_non_blocking; + else + state &= ~internal_non_blocking; + return true; + } + + return false; +} + +int shutdown(socket_type s, int what, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + clear_last_error(); + int result = error_wrapper(::shutdown(s, what), ec); + if (result == 0) + ec = asio::error_code(); + return result; +} + +template <typename SockLenType> +inline int call_connect(SockLenType msghdr::*, + socket_type s, const socket_addr_type* addr, std::size_t addrlen) +{ + return ::connect(s, addr, (SockLenType)addrlen); +} + +int connect(socket_type s, const socket_addr_type* addr, + std::size_t addrlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + clear_last_error(); + int result = error_wrapper(call_connect( + &msghdr::msg_namelen, s, addr, addrlen), ec); + if (result == 0) + ec = asio::error_code(); +#if defined(__linux__) + else if (ec == asio::error::try_again) + ec = asio::error::no_buffer_space; +#endif // defined(__linux__) + return result; +} + +void sync_connect(socket_type s, const socket_addr_type* addr, + std::size_t addrlen, asio::error_code& ec) +{ + // Perform the connect operation. + socket_ops::connect(s, addr, addrlen, ec); + if (ec != asio::error::in_progress + && ec != asio::error::would_block) + { + // The connect operation finished immediately. + return; + } + + // Wait for socket to become ready. + if (socket_ops::poll_connect(s, -1, ec) < 0) + return; + + // Get the error code from the connect operation. + int connect_error = 0; + size_t connect_error_len = sizeof(connect_error); + if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, + &connect_error, &connect_error_len, ec) == socket_error_retval) + return; + + // Return the result of the connect operation. + ec = asio::error_code(connect_error, + asio::error::get_system_category()); +} + +#if defined(ASIO_HAS_IOCP) + +void complete_iocp_connect(socket_type s, asio::error_code& ec) +{ + // Map non-portable errors to their portable counterparts. + switch (ec.value()) + { + case ERROR_CONNECTION_REFUSED: + ec = asio::error::connection_refused; + break; + case ERROR_NETWORK_UNREACHABLE: + ec = asio::error::network_unreachable; + break; + case ERROR_HOST_UNREACHABLE: + ec = asio::error::host_unreachable; + break; + case ERROR_SEM_TIMEOUT: + ec = asio::error::timed_out; + break; + default: + break; + } + + if (!ec) + { + // Need to set the SO_UPDATE_CONNECT_CONTEXT option so that getsockname + // and getpeername will work on the connected socket. + socket_ops::state_type state = 0; + const int so_update_connect_context = 0x7010; + socket_ops::setsockopt(s, state, SOL_SOCKET, + so_update_connect_context, 0, 0, ec); + } +} + +#endif // defined(ASIO_HAS_IOCP) + +bool non_blocking_connect(socket_type s, asio::error_code& ec) +{ + // Check if the connect operation has finished. This is required since we may + // get spurious readiness notifications from the reactor. +#if defined(ASIO_WINDOWS) \ + || defined(__CYGWIN__) \ + || defined(__SYMBIAN32__) + fd_set write_fds; + FD_ZERO(&write_fds); + FD_SET(s, &write_fds); + fd_set except_fds; + FD_ZERO(&except_fds); + FD_SET(s, &except_fds); + timeval zero_timeout; + zero_timeout.tv_sec = 0; + zero_timeout.tv_usec = 0; + int ready = ::select(s + 1, 0, &write_fds, &except_fds, &zero_timeout); +#else // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + pollfd fds; + fds.fd = s; + fds.events = POLLOUT; + fds.revents = 0; + int ready = ::poll(&fds, 1, 0); +#endif // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + if (ready == 0) + { + // The asynchronous connect operation is still in progress. + return false; + } + + // Get the error code from the connect operation. + int connect_error = 0; + size_t connect_error_len = sizeof(connect_error); + if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, + &connect_error, &connect_error_len, ec) == 0) + { + if (connect_error) + { + ec = asio::error_code(connect_error, + asio::error::get_system_category()); + } + else + ec = asio::error_code(); + } + + return true; +} + +int socketpair(int af, int type, int protocol, + socket_type sv[2], asio::error_code& ec) +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + (void)(af); + (void)(type); + (void)(protocol); + (void)(sv); + ec = asio::error::operation_not_supported; + return socket_error_retval; +#else + clear_last_error(); + int result = error_wrapper(::socketpair(af, type, protocol, sv), ec); + if (result == 0) + ec = asio::error_code(); + return result; +#endif +} + +bool sockatmark(socket_type s, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return false; + } + +#if defined(SIOCATMARK) + ioctl_arg_type value = 0; +# if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + int result = error_wrapper(::ioctlsocket(s, SIOCATMARK, &value), ec); +# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + int result = error_wrapper(::ioctl(s, SIOCATMARK, &value), ec); +# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + if (result == 0) + ec = asio::error_code(); +# if defined(ENOTTY) + if (ec.value() == ENOTTY) + ec = asio::error::not_socket; +# endif // defined(ENOTTY) +#else // defined(SIOCATMARK) + int value = error_wrapper(::sockatmark(s), ec); + if (value != -1) + ec = asio::error_code(); +#endif // defined(SIOCATMARK) + + return ec ? false : value != 0; +} + +size_t available(socket_type s, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return 0; + } + + ioctl_arg_type value = 0; +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + int result = error_wrapper(::ioctlsocket(s, FIONREAD, &value), ec); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + int result = error_wrapper(::ioctl(s, FIONREAD, &value), ec); +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + if (result == 0) + ec = asio::error_code(); +#if defined(ENOTTY) + if (ec.value() == ENOTTY) + ec = asio::error::not_socket; +#endif // defined(ENOTTY) + + return ec ? static_cast<size_t>(0) : static_cast<size_t>(value); +} + +int listen(socket_type s, int backlog, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + clear_last_error(); + int result = error_wrapper(::listen(s, backlog), ec); + if (result == 0) + ec = asio::error_code(); + return result; +} + +inline void init_buf_iov_base(void*& base, void* addr) +{ + base = addr; +} + +template <typename T> +inline void init_buf_iov_base(T& base, void* addr) +{ + base = static_cast<T>(addr); +} + +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) +typedef WSABUF buf; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +typedef iovec buf; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + +void init_buf(buf& b, void* data, size_t size) +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + b.buf = static_cast<char*>(data); + b.len = static_cast<u_long>(size); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + init_buf_iov_base(b.iov_base, data); + b.iov_len = size; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +void init_buf(buf& b, const void* data, size_t size) +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + b.buf = static_cast<char*>(const_cast<void*>(data)); + b.len = static_cast<u_long>(size); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + init_buf_iov_base(b.iov_base, const_cast<void*>(data)); + b.iov_len = size; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +inline void init_msghdr_msg_name(void*& name, socket_addr_type* addr) +{ + name = addr; +} + +inline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr) +{ + name = const_cast<socket_addr_type*>(addr); +} + +template <typename T> +inline void init_msghdr_msg_name(T& name, socket_addr_type* addr) +{ + name = reinterpret_cast<T>(addr); +} + +template <typename T> +inline void init_msghdr_msg_name(T& name, const socket_addr_type* addr) +{ + name = reinterpret_cast<T>(const_cast<socket_addr_type*>(addr)); +} + +signed_size_type recv(socket_type s, buf* bufs, size_t count, + int flags, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // Receive some data. + DWORD recv_buf_count = static_cast<DWORD>(count); + DWORD bytes_transferred = 0; + DWORD recv_flags = flags; + int result = error_wrapper(::WSARecv(s, bufs, + recv_buf_count, &bytes_transferred, &recv_flags, 0, 0), ec); + if (ec.value() == ERROR_NETNAME_DELETED) + ec = asio::error::connection_reset; + else if (ec.value() == ERROR_PORT_UNREACHABLE) + ec = asio::error::connection_refused; + else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) + ec.assign(0, ec.category()); + if (result != 0) + return socket_error_retval; + ec = asio::error_code(); + return bytes_transferred; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + msghdr msg = msghdr(); + msg.msg_iov = bufs; + msg.msg_iovlen = static_cast<int>(count); + signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +size_t sync_recv(socket_type s, state_type state, buf* bufs, + size_t count, int flags, bool all_empty, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // A request to read 0 bytes on a stream is a no-op. + if (all_empty && (state & stream_oriented)) + { + ec = asio::error_code(); + return 0; + } + + // Read some data. + for (;;) + { + // Try to complete the operation without blocking. + signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); + + // Check if operation succeeded. + if (bytes > 0) + return bytes; + + // Check for EOF. + if ((state & stream_oriented) && bytes == 0) + { + ec = asio::error::eof; + return 0; + } + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for socket to become ready. + if (socket_ops::poll_read(s, 0, -1, ec) < 0) + return 0; + } +} + +#if defined(ASIO_HAS_IOCP) + +void complete_iocp_recv(state_type state, + const weak_cancel_token_type& cancel_token, bool all_empty, + asio::error_code& ec, size_t bytes_transferred) +{ + // Map non-portable errors to their portable counterparts. + if (ec.value() == ERROR_NETNAME_DELETED) + { + if (cancel_token.expired()) + ec = asio::error::operation_aborted; + else + ec = asio::error::connection_reset; + } + else if (ec.value() == ERROR_PORT_UNREACHABLE) + { + ec = asio::error::connection_refused; + } + else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) + { + ec.assign(0, ec.category()); + } + + // Check for connection closed. + else if (!ec && bytes_transferred == 0 + && (state & stream_oriented) != 0 + && !all_empty) + { + ec = asio::error::eof; + } +} + +#else // defined(ASIO_HAS_IOCP) + +bool non_blocking_recv(socket_type s, + buf* bufs, size_t count, int flags, bool is_stream, + asio::error_code& ec, size_t& bytes_transferred) +{ + for (;;) + { + // Read some data. + signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); + + // Check for end of stream. + if (is_stream && bytes == 0) + { + ec = asio::error::eof; + return true; + } + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes >= 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +#endif // defined(ASIO_HAS_IOCP) + +signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, + int flags, socket_addr_type* addr, std::size_t* addrlen, + asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // Receive some data. + DWORD recv_buf_count = static_cast<DWORD>(count); + DWORD bytes_transferred = 0; + DWORD recv_flags = flags; + int tmp_addrlen = (int)*addrlen; + int result = error_wrapper(::WSARecvFrom(s, bufs, recv_buf_count, + &bytes_transferred, &recv_flags, addr, &tmp_addrlen, 0, 0), ec); + *addrlen = (std::size_t)tmp_addrlen; + if (ec.value() == ERROR_NETNAME_DELETED) + ec = asio::error::connection_reset; + else if (ec.value() == ERROR_PORT_UNREACHABLE) + ec = asio::error::connection_refused; + else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) + ec.assign(0, ec.category()); + if (result != 0) + return socket_error_retval; + ec = asio::error_code(); + return bytes_transferred; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + msghdr msg = msghdr(); + init_msghdr_msg_name(msg.msg_name, addr); + msg.msg_namelen = static_cast<int>(*addrlen); + msg.msg_iov = bufs; + msg.msg_iovlen = static_cast<int>(count); + signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); + *addrlen = msg.msg_namelen; + if (result >= 0) + ec = asio::error_code(); + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, + size_t count, int flags, socket_addr_type* addr, + std::size_t* addrlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // Read some data. + for (;;) + { + // Try to complete the operation without blocking. + signed_size_type bytes = socket_ops::recvfrom( + s, bufs, count, flags, addr, addrlen, ec); + + // Check if operation succeeded. + if (bytes >= 0) + return bytes; + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for socket to become ready. + if (socket_ops::poll_read(s, 0, -1, ec) < 0) + return 0; + } +} + +#if defined(ASIO_HAS_IOCP) + +void complete_iocp_recvfrom( + const weak_cancel_token_type& cancel_token, + asio::error_code& ec) +{ + // Map non-portable errors to their portable counterparts. + if (ec.value() == ERROR_NETNAME_DELETED) + { + if (cancel_token.expired()) + ec = asio::error::operation_aborted; + else + ec = asio::error::connection_reset; + } + else if (ec.value() == ERROR_PORT_UNREACHABLE) + { + ec = asio::error::connection_refused; + } + else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) + { + ec.assign(0, ec.category()); + } +} + +#else // defined(ASIO_HAS_IOCP) + +bool non_blocking_recvfrom(socket_type s, + buf* bufs, size_t count, int flags, + socket_addr_type* addr, std::size_t* addrlen, + asio::error_code& ec, size_t& bytes_transferred) +{ + for (;;) + { + // Read some data. + signed_size_type bytes = socket_ops::recvfrom( + s, bufs, count, flags, addr, addrlen, ec); + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes >= 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +#endif // defined(ASIO_HAS_IOCP) + +signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, + int in_flags, int& out_flags, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + out_flags = 0; + return socket_ops::recv(s, bufs, count, in_flags, ec); +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + msghdr msg = msghdr(); + msg.msg_iov = bufs; + msg.msg_iovlen = static_cast<int>(count); + signed_size_type result = error_wrapper(::recvmsg(s, &msg, in_flags), ec); + if (result >= 0) + { + ec = asio::error_code(); + out_flags = msg.msg_flags; + } + else + out_flags = 0; + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +size_t sync_recvmsg(socket_type s, state_type state, + buf* bufs, size_t count, int in_flags, int& out_flags, + asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // Read some data. + for (;;) + { + // Try to complete the operation without blocking. + signed_size_type bytes = socket_ops::recvmsg( + s, bufs, count, in_flags, out_flags, ec); + + // Check if operation succeeded. + if (bytes >= 0) + return bytes; + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for socket to become ready. + if (socket_ops::poll_read(s, 0, -1, ec) < 0) + return 0; + } +} + +#if defined(ASIO_HAS_IOCP) + +void complete_iocp_recvmsg( + const weak_cancel_token_type& cancel_token, + asio::error_code& ec) +{ + // Map non-portable errors to their portable counterparts. + if (ec.value() == ERROR_NETNAME_DELETED) + { + if (cancel_token.expired()) + ec = asio::error::operation_aborted; + else + ec = asio::error::connection_reset; + } + else if (ec.value() == ERROR_PORT_UNREACHABLE) + { + ec = asio::error::connection_refused; + } + else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) + { + ec.assign(0, ec.category()); + } +} + +#else // defined(ASIO_HAS_IOCP) + +bool non_blocking_recvmsg(socket_type s, + buf* bufs, size_t count, int in_flags, int& out_flags, + asio::error_code& ec, size_t& bytes_transferred) +{ + for (;;) + { + // Read some data. + signed_size_type bytes = socket_ops::recvmsg( + s, bufs, count, in_flags, out_flags, ec); + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes >= 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +#endif // defined(ASIO_HAS_IOCP) + +signed_size_type send(socket_type s, const buf* bufs, size_t count, + int flags, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // Send the data. + DWORD send_buf_count = static_cast<DWORD>(count); + DWORD bytes_transferred = 0; + DWORD send_flags = flags; + int result = error_wrapper(::WSASend(s, const_cast<buf*>(bufs), + send_buf_count, &bytes_transferred, send_flags, 0, 0), ec); + if (ec.value() == ERROR_NETNAME_DELETED) + ec = asio::error::connection_reset; + else if (ec.value() == ERROR_PORT_UNREACHABLE) + ec = asio::error::connection_refused; + if (result != 0) + return socket_error_retval; + ec = asio::error_code(); + return bytes_transferred; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + msghdr msg = msghdr(); + msg.msg_iov = const_cast<buf*>(bufs); + msg.msg_iovlen = static_cast<int>(count); +#if defined(__linux__) + flags |= MSG_NOSIGNAL; +#endif // defined(__linux__) + signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +size_t sync_send(socket_type s, state_type state, const buf* bufs, + size_t count, int flags, bool all_empty, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // A request to write 0 bytes to a stream is a no-op. + if (all_empty && (state & stream_oriented)) + { + ec = asio::error_code(); + return 0; + } + + // Read some data. + for (;;) + { + // Try to complete the operation without blocking. + signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); + + // Check if operation succeeded. + if (bytes >= 0) + return bytes; + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for socket to become ready. + if (socket_ops::poll_write(s, 0, -1, ec) < 0) + return 0; + } +} + +#if defined(ASIO_HAS_IOCP) + +void complete_iocp_send( + const weak_cancel_token_type& cancel_token, + asio::error_code& ec) +{ + // Map non-portable errors to their portable counterparts. + if (ec.value() == ERROR_NETNAME_DELETED) + { + if (cancel_token.expired()) + ec = asio::error::operation_aborted; + else + ec = asio::error::connection_reset; + } + else if (ec.value() == ERROR_PORT_UNREACHABLE) + { + ec = asio::error::connection_refused; + } +} + +#else // defined(ASIO_HAS_IOCP) + +bool non_blocking_send(socket_type s, + const buf* bufs, size_t count, int flags, + asio::error_code& ec, size_t& bytes_transferred) +{ + for (;;) + { + // Write some data. + signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes >= 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +#endif // defined(ASIO_HAS_IOCP) + +signed_size_type sendto(socket_type s, const buf* bufs, size_t count, + int flags, const socket_addr_type* addr, std::size_t addrlen, + asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + // Send the data. + DWORD send_buf_count = static_cast<DWORD>(count); + DWORD bytes_transferred = 0; + int result = error_wrapper(::WSASendTo(s, const_cast<buf*>(bufs), + send_buf_count, &bytes_transferred, flags, addr, + static_cast<int>(addrlen), 0, 0), ec); + if (ec.value() == ERROR_NETNAME_DELETED) + ec = asio::error::connection_reset; + else if (ec.value() == ERROR_PORT_UNREACHABLE) + ec = asio::error::connection_refused; + if (result != 0) + return socket_error_retval; + ec = asio::error_code(); + return bytes_transferred; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + msghdr msg = msghdr(); + init_msghdr_msg_name(msg.msg_name, addr); + msg.msg_namelen = static_cast<int>(addrlen); + msg.msg_iov = const_cast<buf*>(bufs); + msg.msg_iovlen = static_cast<int>(count); +#if defined(__linux__) + flags |= MSG_NOSIGNAL; +#endif // defined(__linux__) + signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +size_t sync_sendto(socket_type s, state_type state, const buf* bufs, + size_t count, int flags, const socket_addr_type* addr, + std::size_t addrlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // Write some data. + for (;;) + { + // Try to complete the operation without blocking. + signed_size_type bytes = socket_ops::sendto( + s, bufs, count, flags, addr, addrlen, ec); + + // Check if operation succeeded. + if (bytes >= 0) + return bytes; + + // Operation failed. + if ((state & user_set_non_blocking) + || (ec != asio::error::would_block + && ec != asio::error::try_again)) + return 0; + + // Wait for socket to become ready. + if (socket_ops::poll_write(s, 0, -1, ec) < 0) + return 0; + } +} + +#if !defined(ASIO_HAS_IOCP) + +bool non_blocking_sendto(socket_type s, + const buf* bufs, size_t count, int flags, + const socket_addr_type* addr, std::size_t addrlen, + asio::error_code& ec, size_t& bytes_transferred) +{ + for (;;) + { + // Write some data. + signed_size_type bytes = socket_ops::sendto( + s, bufs, count, flags, addr, addrlen, ec); + + // Retry operation if interrupted by signal. + if (ec == asio::error::interrupted) + continue; + + // Check if we need to run the operation again. + if (ec == asio::error::would_block + || ec == asio::error::try_again) + return false; + + // Operation is complete. + if (bytes >= 0) + { + ec = asio::error_code(); + bytes_transferred = bytes; + } + else + bytes_transferred = 0; + + return true; + } +} + +#endif // !defined(ASIO_HAS_IOCP) + +socket_type socket(int af, int type, int protocol, + asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + socket_type s = error_wrapper(::WSASocketW(af, type, protocol, 0, 0, + WSA_FLAG_OVERLAPPED), ec); + if (s == invalid_socket) + return s; + + if (af == ASIO_OS_DEF(AF_INET6)) + { + // Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to + // false. This will only succeed on Windows Vista and later versions of + // Windows, where a dual-stack IPv4/v6 implementation is available. + DWORD optval = 0; + ::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, + reinterpret_cast<const char*>(&optval), sizeof(optval)); + } + + ec = asio::error_code(); + + return s; +#elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) + socket_type s = error_wrapper(::socket(af, type, protocol), ec); + if (s == invalid_socket) + return s; + + int optval = 1; + int result = error_wrapper(::setsockopt(s, + SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); + if (result != 0) + { + ::close(s); + return invalid_socket; + } + + return s; +#else + int s = error_wrapper(::socket(af, type, protocol), ec); + if (s >= 0) + ec = asio::error_code(); + return s; +#endif +} + +template <typename SockLenType> +inline int call_setsockopt(SockLenType msghdr::*, + socket_type s, int level, int optname, + const void* optval, std::size_t optlen) +{ + return ::setsockopt(s, level, optname, + (const char*)optval, (SockLenType)optlen); +} + +int setsockopt(socket_type s, state_type& state, int level, int optname, + const void* optval, std::size_t optlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + if (level == custom_socket_option_level && optname == always_fail_option) + { + ec = asio::error::invalid_argument; + return socket_error_retval; + } + + if (level == custom_socket_option_level + && optname == enable_connection_aborted_option) + { + if (optlen != sizeof(int)) + { + ec = asio::error::invalid_argument; + return socket_error_retval; + } + + if (*static_cast<const int*>(optval)) + state |= enable_connection_aborted; + else + state &= ~enable_connection_aborted; + ec = asio::error_code(); + return 0; + } + + if (level == SOL_SOCKET && optname == SO_LINGER) + state |= user_set_linger; + +#if defined(__BORLANDC__) + // Mysteriously, using the getsockopt and setsockopt functions directly with + // Borland C++ results in incorrect values being set and read. The bug can be + // worked around by using function addresses resolved with GetProcAddress. + if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) + { + typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int); + if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, "setsockopt")) + { + clear_last_error(); + return error_wrapper(sso(s, level, optname, + reinterpret_cast<const char*>(optval), + static_cast<int>(optlen)), ec); + } + } + ec = asio::error::fault; + return socket_error_retval; +#else // defined(__BORLANDC__) + clear_last_error(); + int result = error_wrapper(call_setsockopt(&msghdr::msg_namelen, + s, level, optname, optval, optlen), ec); + if (result == 0) + { + ec = asio::error_code(); + +#if defined(__MACH__) && defined(__APPLE__) \ + || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) + // To implement portable behaviour for SO_REUSEADDR with UDP sockets we + // need to also set SO_REUSEPORT on BSD-based platforms. + if ((state & datagram_oriented) + && level == SOL_SOCKET && optname == SO_REUSEADDR) + { + call_setsockopt(&msghdr::msg_namelen, s, + SOL_SOCKET, SO_REUSEPORT, optval, optlen); + } +#endif + } + + return result; +#endif // defined(__BORLANDC__) +} + +template <typename SockLenType> +inline int call_getsockopt(SockLenType msghdr::*, + socket_type s, int level, int optname, + void* optval, std::size_t* optlen) +{ + SockLenType tmp_optlen = (SockLenType)*optlen; + int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen); + *optlen = (std::size_t)tmp_optlen; + return result; +} + +int getsockopt(socket_type s, state_type state, int level, int optname, + void* optval, size_t* optlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + if (level == custom_socket_option_level && optname == always_fail_option) + { + ec = asio::error::invalid_argument; + return socket_error_retval; + } + + if (level == custom_socket_option_level + && optname == enable_connection_aborted_option) + { + if (*optlen != sizeof(int)) + { + ec = asio::error::invalid_argument; + return socket_error_retval; + } + + *static_cast<int*>(optval) = (state & enable_connection_aborted) ? 1 : 0; + ec = asio::error_code(); + return 0; + } + +#if defined(__BORLANDC__) + // Mysteriously, using the getsockopt and setsockopt functions directly with + // Borland C++ results in incorrect values being set and read. The bug can be + // worked around by using function addresses resolved with GetProcAddress. + if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) + { + typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*); + if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, "getsockopt")) + { + clear_last_error(); + int tmp_optlen = static_cast<int>(*optlen); + int result = error_wrapper(gso(s, level, optname, + reinterpret_cast<char*>(optval), &tmp_optlen), ec); + *optlen = static_cast<size_t>(tmp_optlen); + if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY + && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) + { + // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are + // only supported on Windows Vista and later. To simplify program logic + // we will fake success of getting this option and specify that the + // value is non-zero (i.e. true). This corresponds to the behavior of + // IPv6 sockets on Windows platforms pre-Vista. + *static_cast<DWORD*>(optval) = 1; + ec = asio::error_code(); + } + return result; + } + } + ec = asio::error::fault; + return socket_error_retval; +#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) + clear_last_error(); + int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, + s, level, optname, optval, optlen), ec); + if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY + && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) + { + // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only + // supported on Windows Vista and later. To simplify program logic we will + // fake success of getting this option and specify that the value is + // non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets + // on Windows platforms pre-Vista. + *static_cast<DWORD*>(optval) = 1; + ec = asio::error_code(); + } + if (result == 0) + ec = asio::error_code(); + return result; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + clear_last_error(); + int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, + s, level, optname, optval, optlen), ec); +#if defined(__linux__) + if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int) + && (optname == SO_SNDBUF || optname == SO_RCVBUF)) + { + // On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel + // to set the buffer size to N*2. Linux puts additional stuff into the + // buffers so that only about half is actually available to the application. + // The retrieved value is divided by 2 here to make it appear as though the + // correct value has been set. + *static_cast<int*>(optval) /= 2; + } +#endif // defined(__linux__) + if (result == 0) + ec = asio::error_code(); + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +template <typename SockLenType> +inline int call_getpeername(SockLenType msghdr::*, + socket_type s, socket_addr_type* addr, std::size_t* addrlen) +{ + SockLenType tmp_addrlen = (SockLenType)*addrlen; + int result = ::getpeername(s, addr, &tmp_addrlen); + *addrlen = (std::size_t)tmp_addrlen; + return result; +} + +int getpeername(socket_type s, socket_addr_type* addr, + std::size_t* addrlen, bool cached, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + +#if defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) \ + || defined(__CYGWIN__) + if (cached) + { + // Check if socket is still connected. + DWORD connect_time = 0; + size_t connect_time_len = sizeof(connect_time); + if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME, + &connect_time, &connect_time_len, ec) == socket_error_retval) + { + return socket_error_retval; + } + if (connect_time == 0xFFFFFFFF) + { + ec = asio::error::not_connected; + return socket_error_retval; + } + + // The cached value is still valid. + ec = asio::error_code(); + return 0; + } +#else // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) + // || defined(__CYGWIN__) + (void)cached; +#endif // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) + // || defined(__CYGWIN__) + + clear_last_error(); + int result = error_wrapper(call_getpeername( + &msghdr::msg_namelen, s, addr, addrlen), ec); + if (result == 0) + ec = asio::error_code(); + return result; +} + +template <typename SockLenType> +inline int call_getsockname(SockLenType msghdr::*, + socket_type s, socket_addr_type* addr, std::size_t* addrlen) +{ + SockLenType tmp_addrlen = (SockLenType)*addrlen; + int result = ::getsockname(s, addr, &tmp_addrlen); + *addrlen = (std::size_t)tmp_addrlen; + return result; +} + +int getsockname(socket_type s, socket_addr_type* addr, + std::size_t* addrlen, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + clear_last_error(); + int result = error_wrapper(call_getsockname( + &msghdr::msg_namelen, s, addr, addrlen), ec); + if (result == 0) + ec = asio::error_code(); + return result; +} + +int ioctl(socket_type s, state_type& state, int cmd, + ioctl_arg_type* arg, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + int result = error_wrapper(::ioctlsocket(s, cmd, arg), ec); +#elif defined(__MACH__) && defined(__APPLE__) \ + || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) + int result = error_wrapper(::ioctl(s, + static_cast<unsigned int>(cmd), arg), ec); +#else + int result = error_wrapper(::ioctl(s, cmd, arg), ec); +#endif + if (result >= 0) + { + ec = asio::error_code(); + + // When updating the non-blocking mode we always perform the ioctl syscall, + // even if the flags would otherwise indicate that the socket is already in + // the correct state. This ensures that the underlying socket is put into + // the state that has been requested by the user. If the ioctl syscall was + // successful then we need to update the flags to match. + if (cmd == static_cast<int>(FIONBIO)) + { + if (*arg) + { + state |= user_set_non_blocking; + } + else + { + // Clearing the non-blocking mode always overrides any internally-set + // non-blocking flag. Any subsequent asynchronous operations will need + // to re-enable non-blocking I/O. + state &= ~(user_set_non_blocking | internal_non_blocking); + } + } + } + + return result; +} + +int select(int nfds, fd_set* readfds, fd_set* writefds, + fd_set* exceptfds, timeval* timeout, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + if (!readfds && !writefds && !exceptfds && timeout) + { + DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; + if (milliseconds == 0) + milliseconds = 1; // Force context switch. + ::Sleep(milliseconds); + ec = asio::error_code(); + return 0; + } + + // The select() call allows timeout values measured in microseconds, but the + // system clock (as wrapped by boost::posix_time::microsec_clock) typically + // has a resolution of 10 milliseconds. This can lead to a spinning select + // reactor, meaning increased CPU usage, when waiting for the earliest + // scheduled timeout if it's less than 10 milliseconds away. To avoid a tight + // spin we'll use a minimum timeout of 1 millisecond. + if (timeout && timeout->tv_sec == 0 + && timeout->tv_usec > 0 && timeout->tv_usec < 1000) + timeout->tv_usec = 1000; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + +#if defined(__hpux) && defined(__SELECT) + timespec ts; + ts.tv_sec = timeout ? timeout->tv_sec : 0; + ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0; + return error_wrapper(::pselect(nfds, readfds, + writefds, exceptfds, timeout ? &ts : 0, 0), ec); +#else + int result = error_wrapper(::select(nfds, readfds, + writefds, exceptfds, timeout), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +#endif +} + +int poll_read(socket_type s, state_type state, + int msec, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + +#if defined(ASIO_WINDOWS) \ + || defined(__CYGWIN__) \ + || defined(__SYMBIAN32__) + fd_set fds; + FD_ZERO(&fds); + FD_SET(s, &fds); + timeval timeout_obj; + timeval* timeout; + if (state & user_set_non_blocking) + { + timeout_obj.tv_sec = 0; + timeout_obj.tv_usec = 0; + timeout = &timeout_obj; + } + else if (msec >= 0) + { + timeout_obj.tv_sec = msec / 1000; + timeout_obj.tv_usec = (msec % 1000) * 1000; + timeout = &timeout_obj; + } + else + timeout = 0; + clear_last_error(); + int result = error_wrapper(::select(s + 1, &fds, 0, 0, timeout), ec); +#else // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + pollfd fds; + fds.fd = s; + fds.events = POLLIN; + fds.revents = 0; + int timeout = (state & user_set_non_blocking) ? 0 : msec; + clear_last_error(); + int result = error_wrapper(::poll(&fds, 1, timeout), ec); +#endif // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + if (result == 0) + ec = (state & user_set_non_blocking) + ? asio::error::would_block : asio::error_code(); + else if (result > 0) + ec = asio::error_code(); + return result; +} + +int poll_write(socket_type s, state_type state, + int msec, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + +#if defined(ASIO_WINDOWS) \ + || defined(__CYGWIN__) \ + || defined(__SYMBIAN32__) + fd_set fds; + FD_ZERO(&fds); + FD_SET(s, &fds); + timeval timeout_obj; + timeval* timeout; + if (state & user_set_non_blocking) + { + timeout_obj.tv_sec = 0; + timeout_obj.tv_usec = 0; + timeout = &timeout_obj; + } + else if (msec >= 0) + { + timeout_obj.tv_sec = msec / 1000; + timeout_obj.tv_usec = (msec % 1000) * 1000; + timeout = &timeout_obj; + } + else + timeout = 0; + clear_last_error(); + int result = error_wrapper(::select(s + 1, 0, &fds, 0, timeout), ec); +#else // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + pollfd fds; + fds.fd = s; + fds.events = POLLOUT; + fds.revents = 0; + int timeout = (state & user_set_non_blocking) ? 0 : msec; + clear_last_error(); + int result = error_wrapper(::poll(&fds, 1, timeout), ec); +#endif // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + if (result == 0) + ec = (state & user_set_non_blocking) + ? asio::error::would_block : asio::error_code(); + else if (result > 0) + ec = asio::error_code(); + return result; +} + +int poll_error(socket_type s, state_type state, + int msec, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + +#if defined(ASIO_WINDOWS) \ + || defined(__CYGWIN__) \ + || defined(__SYMBIAN32__) + fd_set fds; + FD_ZERO(&fds); + FD_SET(s, &fds); + timeval timeout_obj; + timeval* timeout; + if (state & user_set_non_blocking) + { + timeout_obj.tv_sec = 0; + timeout_obj.tv_usec = 0; + timeout = &timeout_obj; + } + else if (msec >= 0) + { + timeout_obj.tv_sec = msec / 1000; + timeout_obj.tv_usec = (msec % 1000) * 1000; + timeout = &timeout_obj; + } + else + timeout = 0; + clear_last_error(); + int result = error_wrapper(::select(s + 1, 0, 0, &fds, timeout), ec); +#else // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + pollfd fds; + fds.fd = s; + fds.events = POLLPRI | POLLERR | POLLHUP; + fds.revents = 0; + int timeout = (state & user_set_non_blocking) ? 0 : msec; + clear_last_error(); + int result = error_wrapper(::poll(&fds, 1, timeout), ec); +#endif // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + if (result == 0) + ec = (state & user_set_non_blocking) + ? asio::error::would_block : asio::error_code(); + else if (result > 0) + ec = asio::error_code(); + return result; +} + +int poll_connect(socket_type s, int msec, asio::error_code& ec) +{ + if (s == invalid_socket) + { + ec = asio::error::bad_descriptor; + return socket_error_retval; + } + +#if defined(ASIO_WINDOWS) \ + || defined(__CYGWIN__) \ + || defined(__SYMBIAN32__) + fd_set write_fds; + FD_ZERO(&write_fds); + FD_SET(s, &write_fds); + fd_set except_fds; + FD_ZERO(&except_fds); + FD_SET(s, &except_fds); + timeval timeout_obj; + timeval* timeout; + if (msec >= 0) + { + timeout_obj.tv_sec = msec / 1000; + timeout_obj.tv_usec = (msec % 1000) * 1000; + timeout = &timeout_obj; + } + else + timeout = 0; + clear_last_error(); + int result = error_wrapper(::select( + s + 1, 0, &write_fds, &except_fds, timeout), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +#else // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + pollfd fds; + fds.fd = s; + fds.events = POLLOUT; + fds.revents = 0; + clear_last_error(); + int result = error_wrapper(::poll(&fds, 1, msec), ec); + if (result >= 0) + ec = asio::error_code(); + return result; +#endif // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) +} + +#endif // !defined(ASIO_WINDOWS_RUNTIME) + +const char* inet_ntop(int af, const void* src, char* dest, size_t length, + unsigned long scope_id, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS_RUNTIME) + using namespace std; // For sprintf. + const unsigned char* bytes = static_cast<const unsigned char*>(src); + if (af == ASIO_OS_DEF(AF_INET)) + { + sprintf_s(dest, length, "%u.%u.%u.%u", + bytes[0], bytes[1], bytes[2], bytes[3]); + return dest; + } + else if (af == ASIO_OS_DEF(AF_INET6)) + { + size_t n = 0, b = 0, z = 0; + while (n < length && b < 16) + { + if (bytes[b] == 0 && bytes[b + 1] == 0 && z == 0) + { + do b += 2; while (b < 16 && bytes[b] == 0 && bytes[b + 1] == 0); + n += sprintf_s(dest + n, length - n, ":%s", b < 16 ? "" : ":"), ++z; + } + else + { + n += sprintf_s(dest + n, length - n, "%s%x", b ? ":" : "", + (static_cast<u_long_type>(bytes[b]) << 8) | bytes[b + 1]); + b += 2; + } + } + if (scope_id) + n += sprintf_s(dest + n, length - n, "%%%lu", scope_id); + return dest; + } + else + { + ec = asio::error::address_family_not_supported; + return 0; + } +#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) + using namespace std; // For memcpy. + + if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) + { + ec = asio::error::address_family_not_supported; + return 0; + } + + union + { + socket_addr_type base; + sockaddr_storage_type storage; + sockaddr_in4_type v4; + sockaddr_in6_type v6; + } address; + DWORD address_length; + if (af == ASIO_OS_DEF(AF_INET)) + { + address_length = sizeof(sockaddr_in4_type); + address.v4.sin_family = ASIO_OS_DEF(AF_INET); + address.v4.sin_port = 0; + memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type)); + } + else // AF_INET6 + { + address_length = sizeof(sockaddr_in6_type); + address.v6.sin6_family = ASIO_OS_DEF(AF_INET6); + address.v6.sin6_port = 0; + address.v6.sin6_flowinfo = 0; + address.v6.sin6_scope_id = scope_id; + memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type)); + } + + DWORD string_length = static_cast<DWORD>(length); +#if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) + LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR)); + int result = error_wrapper(::WSAAddressToStringW(&address.base, + address_length, 0, string_buffer, &string_length), ec); + ::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1, + dest, static_cast<int>(length), 0, 0); +#else + int result = error_wrapper(::WSAAddressToStringA( + &address.base, address_length, 0, dest, &string_length), ec); +#endif + + // Windows may set error code on success. + if (result != socket_error_retval) + ec = asio::error_code(); + + // Windows may not set an error code on failure. + else if (result == socket_error_retval && !ec) + ec = asio::error::invalid_argument; + + return result == socket_error_retval ? 0 : dest; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + const char* result = error_wrapper(::inet_ntop( + af, src, dest, static_cast<int>(length)), ec); + if (result == 0 && !ec) + ec = asio::error::invalid_argument; + if (result != 0 && af == ASIO_OS_DEF(AF_INET6) && scope_id != 0) + { + using namespace std; // For strcat and sprintf. + char if_name[IF_NAMESIZE + 1] = "%"; + const in6_addr_type* ipv6_address = static_cast<const in6_addr_type*>(src); + bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) + && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); + bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) + && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); + if ((!is_link_local && !is_multicast_link_local) + || if_indextoname(static_cast<unsigned>(scope_id), if_name + 1) == 0) + sprintf(if_name + 1, "%lu", scope_id); + strcat(dest, if_name); + } + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +int inet_pton(int af, const char* src, void* dest, + unsigned long* scope_id, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS_RUNTIME) + using namespace std; // For sscanf. + unsigned char* bytes = static_cast<unsigned char*>(dest); + if (af == ASIO_OS_DEF(AF_INET)) + { + unsigned int b0, b1, b2, b3; + if (sscanf_s(src, "%u.%u.%u.%u", &b0, &b1, &b2, &b3) != 4) + { + ec = asio::error::invalid_argument; + return -1; + } + if (b0 > 255 || b1 > 255 || b2 > 255 || b3 > 255) + { + ec = asio::error::invalid_argument; + return -1; + } + bytes[0] = static_cast<unsigned char>(b0); + bytes[1] = static_cast<unsigned char>(b1); + bytes[2] = static_cast<unsigned char>(b2); + bytes[3] = static_cast<unsigned char>(b3); + ec = asio::error_code(); + return 1; + } + else if (af == ASIO_OS_DEF(AF_INET6)) + { + unsigned char* bytes = static_cast<unsigned char*>(dest); + std::memset(bytes, 0, 16); + unsigned char back_bytes[16] = { 0 }; + int num_front_bytes = 0, num_back_bytes = 0; + const char* p = src; + + enum { fword, fcolon, bword, scope, done } state = fword; + unsigned long current_word = 0; + while (state != done) + { + if (current_word > 0xFFFF) + { + ec = asio::error::invalid_argument; + return -1; + } + + switch (state) + { + case fword: + if (*p >= '0' && *p <= '9') + current_word = current_word * 16 + *p++ - '0'; + else if (*p >= 'a' && *p <= 'f') + current_word = current_word * 16 + *p++ - 'a' + 10; + else if (*p >= 'A' && *p <= 'F') + current_word = current_word * 16 + *p++ - 'A' + 10; + else + { + if (num_front_bytes == 16) + { + ec = asio::error::invalid_argument; + return -1; + } + + bytes[num_front_bytes++] = (current_word >> 8) & 0xFF; + bytes[num_front_bytes++] = current_word & 0xFF; + current_word = 0; + + if (*p == ':') + state = fcolon, ++p; + else if (*p == '%') + state = scope, ++p; + else if (*p == 0) + state = done; + else + { + ec = asio::error::invalid_argument; + return -1; + } + } + break; + + case fcolon: + if (*p == ':') + state = bword, ++p; + else + state = fword; + break; + + case bword: + if (*p >= '0' && *p <= '9') + current_word = current_word * 16 + *p++ - '0'; + else if (*p >= 'a' && *p <= 'f') + current_word = current_word * 16 + *p++ - 'a' + 10; + else if (*p >= 'A' && *p <= 'F') + current_word = current_word * 16 + *p++ - 'A' + 10; + else + { + if (num_front_bytes + num_back_bytes == 16) + { + ec = asio::error::invalid_argument; + return -1; + } + + back_bytes[num_back_bytes++] = (current_word >> 8) & 0xFF; + back_bytes[num_back_bytes++] = current_word & 0xFF; + current_word = 0; + + if (*p == ':') + state = bword, ++p; + else if (*p == '%') + state = scope, ++p; + else if (*p == 0) + state = done; + else + { + ec = asio::error::invalid_argument; + return -1; + } + } + break; + + case scope: + if (*p >= '0' && *p <= '9') + current_word = current_word * 10 + *p++ - '0'; + else if (*p == 0) + *scope_id = current_word, state = done; + else + { + ec = asio::error::invalid_argument; + return -1; + } + break; + + default: + break; + } + } + + for (int i = 0; i < num_back_bytes; ++i) + bytes[16 - num_back_bytes + i] = back_bytes[i]; + + ec = asio::error_code(); + return 1; + } + else + { + ec = asio::error::address_family_not_supported; + return -1; + } +#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) + using namespace std; // For memcpy and strcmp. + + if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) + { + ec = asio::error::address_family_not_supported; + return -1; + } + + union + { + socket_addr_type base; + sockaddr_storage_type storage; + sockaddr_in4_type v4; + sockaddr_in6_type v6; + } address; + int address_length = sizeof(sockaddr_storage_type); +#if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) + int num_wide_chars = static_cast<int>(strlen(src)) + 1; + LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR)); + ::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars); + int result = error_wrapper(::WSAStringToAddressW( + wide_buffer, af, 0, &address.base, &address_length), ec); +#else + int result = error_wrapper(::WSAStringToAddressA( + const_cast<char*>(src), af, 0, &address.base, &address_length), ec); +#endif + + if (af == ASIO_OS_DEF(AF_INET)) + { + if (result != socket_error_retval) + { + memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type)); + ec = asio::error_code(); + } + else if (strcmp(src, "255.255.255.255") == 0) + { + static_cast<in4_addr_type*>(dest)->s_addr = INADDR_NONE; + ec = asio::error_code(); + } + } + else // AF_INET6 + { + if (result != socket_error_retval) + { + memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type)); + if (scope_id) + *scope_id = address.v6.sin6_scope_id; + ec = asio::error_code(); + } + } + + // Windows may not set an error code on failure. + if (result == socket_error_retval && !ec) + ec = asio::error::invalid_argument; + + if (result != socket_error_retval) + ec = asio::error_code(); + + return result == socket_error_retval ? -1 : 1; +#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + using namespace std; // For strchr, memcpy and atoi. + + // On some platforms, inet_pton fails if an address string contains a scope + // id. Detect and remove the scope id before passing the string to inet_pton. + const bool is_v6 = (af == ASIO_OS_DEF(AF_INET6)); + const char* if_name = is_v6 ? strchr(src, '%') : 0; + char src_buf[max_addr_v6_str_len + 1]; + const char* src_ptr = src; + if (if_name != 0) + { + if (if_name - src > max_addr_v6_str_len) + { + ec = asio::error::invalid_argument; + return 0; + } + memcpy(src_buf, src, if_name - src); + src_buf[if_name - src] = 0; + src_ptr = src_buf; + } + + int result = error_wrapper(::inet_pton(af, src_ptr, dest), ec); + if (result <= 0 && !ec) + ec = asio::error::invalid_argument; + if (result > 0 && is_v6 && scope_id) + { + using namespace std; // For strchr and atoi. + *scope_id = 0; + if (if_name != 0) + { + in6_addr_type* ipv6_address = static_cast<in6_addr_type*>(dest); + bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) + && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); + bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) + && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); + if (is_link_local || is_multicast_link_local) + *scope_id = if_nametoindex(if_name + 1); + if (*scope_id == 0) + *scope_id = atoi(if_name + 1); + } + } + return result; +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) +} + +int gethostname(char* name, int namelen, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS_RUNTIME) + try + { + using namespace Windows::Foundation::Collections; + using namespace Windows::Networking; + using namespace Windows::Networking::Connectivity; + IVectorView<HostName^>^ hostnames = NetworkInformation::GetHostNames(); + for (unsigned i = 0; i < hostnames->Size; ++i) + { + HostName^ hostname = hostnames->GetAt(i); + if (hostname->Type == HostNameType::DomainName) + { + std::wstring_convert<std::codecvt_utf8<wchar_t>> converter; + std::string raw_name = converter.to_bytes(hostname->RawName->Data()); + if (namelen > 0 && raw_name.size() < static_cast<std::size_t>(namelen)) + { + strcpy_s(name, namelen, raw_name.c_str()); + return 0; + } + } + } + return -1; + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + return -1; + } +#else // defined(ASIO_WINDOWS_RUNTIME) + int result = error_wrapper(::gethostname(name, namelen), ec); +# if defined(ASIO_WINDOWS) + if (result == 0) + ec = asio::error_code(); +# endif // defined(ASIO_WINDOWS) + return result; +#endif // defined(ASIO_WINDOWS_RUNTIME) +} + +#if !defined(ASIO_WINDOWS_RUNTIME) + +#if !defined(ASIO_HAS_GETADDRINFO) + +// The following functions are only needed for emulation of getaddrinfo and +// getnameinfo. + +inline asio::error_code translate_netdb_error(int error) +{ + switch (error) + { + case 0: + return asio::error_code(); + case HOST_NOT_FOUND: + return asio::error::host_not_found; + case TRY_AGAIN: + return asio::error::host_not_found_try_again; + case NO_RECOVERY: + return asio::error::no_recovery; + case NO_DATA: + return asio::error::no_data; + default: + ASIO_ASSERT(false); + return asio::error::invalid_argument; + } +} + +inline hostent* gethostbyaddr(const char* addr, int length, int af, + hostent* result, char* buffer, int buflength, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + (void)(buffer); + (void)(buflength); + hostent* retval = error_wrapper(::gethostbyaddr(addr, length, af), ec); + if (!retval) + return 0; + ec = asio::error_code(); + *result = *retval; + return retval; +#elif defined(__sun) || defined(__QNX__) + int error = 0; + hostent* retval = error_wrapper(::gethostbyaddr_r(addr, length, af, result, + buffer, buflength, &error), ec); + if (error) + ec = translate_netdb_error(error); + return retval; +#elif defined(__MACH__) && defined(__APPLE__) + (void)(buffer); + (void)(buflength); + int error = 0; + hostent* retval = error_wrapper(::getipnodebyaddr( + addr, length, af, &error), ec); + if (error) + ec = translate_netdb_error(error); + if (!retval) + return 0; + *result = *retval; + return retval; +#else + hostent* retval = 0; + int error = 0; + error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, + buflength, &retval, &error), ec); + if (error) + ec = translate_netdb_error(error); + return retval; +#endif +} + +inline hostent* gethostbyname(const char* name, int af, struct hostent* result, + char* buffer, int buflength, int ai_flags, asio::error_code& ec) +{ + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + (void)(buffer); + (void)(buflength); + (void)(ai_flags); + if (af != ASIO_OS_DEF(AF_INET)) + { + ec = asio::error::address_family_not_supported; + return 0; + } + hostent* retval = error_wrapper(::gethostbyname(name), ec); + if (!retval) + return 0; + ec = asio::error_code(); + *result = *retval; + return result; +#elif defined(__sun) || defined(__QNX__) + (void)(ai_flags); + if (af != ASIO_OS_DEF(AF_INET)) + { + ec = asio::error::address_family_not_supported; + return 0; + } + int error = 0; + hostent* retval = error_wrapper(::gethostbyname_r(name, result, buffer, + buflength, &error), ec); + if (error) + ec = translate_netdb_error(error); + return retval; +#elif defined(__MACH__) && defined(__APPLE__) + (void)(buffer); + (void)(buflength); + int error = 0; + hostent* retval = error_wrapper(::getipnodebyname( + name, af, ai_flags, &error), ec); + if (error) + ec = translate_netdb_error(error); + if (!retval) + return 0; + *result = *retval; + return retval; +#else + (void)(ai_flags); + if (af != ASIO_OS_DEF(AF_INET)) + { + ec = asio::error::address_family_not_supported; + return 0; + } + hostent* retval = 0; + int error = 0; + error_wrapper(::gethostbyname_r(name, result, + buffer, buflength, &retval, &error), ec); + if (error) + ec = translate_netdb_error(error); + return retval; +#endif +} + +inline void freehostent(hostent* h) +{ +#if defined(__MACH__) && defined(__APPLE__) + if (h) + ::freehostent(h); +#else + (void)(h); +#endif +} + +// Emulation of getaddrinfo based on implementation in: +// Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998. + +struct gai_search +{ + const char* host; + int family; +}; + +inline int gai_nsearch(const char* host, + const addrinfo_type* hints, gai_search (&search)[2]) +{ + int search_count = 0; + if (host == 0 || host[0] == '\0') + { + if (hints->ai_flags & AI_PASSIVE) + { + // No host and AI_PASSIVE implies wildcard bind. + switch (hints->ai_family) + { + case ASIO_OS_DEF(AF_INET): + search[search_count].host = "0.0.0.0"; + search[search_count].family = ASIO_OS_DEF(AF_INET); + ++search_count; + break; + case ASIO_OS_DEF(AF_INET6): + search[search_count].host = "0::0"; + search[search_count].family = ASIO_OS_DEF(AF_INET6); + ++search_count; + break; + case ASIO_OS_DEF(AF_UNSPEC): + search[search_count].host = "0::0"; + search[search_count].family = ASIO_OS_DEF(AF_INET6); + ++search_count; + search[search_count].host = "0.0.0.0"; + search[search_count].family = ASIO_OS_DEF(AF_INET); + ++search_count; + break; + default: + break; + } + } + else + { + // No host and not AI_PASSIVE means connect to local host. + switch (hints->ai_family) + { + case ASIO_OS_DEF(AF_INET): + search[search_count].host = "localhost"; + search[search_count].family = ASIO_OS_DEF(AF_INET); + ++search_count; + break; + case ASIO_OS_DEF(AF_INET6): + search[search_count].host = "localhost"; + search[search_count].family = ASIO_OS_DEF(AF_INET6); + ++search_count; + break; + case ASIO_OS_DEF(AF_UNSPEC): + search[search_count].host = "localhost"; + search[search_count].family = ASIO_OS_DEF(AF_INET6); + ++search_count; + search[search_count].host = "localhost"; + search[search_count].family = ASIO_OS_DEF(AF_INET); + ++search_count; + break; + default: + break; + } + } + } + else + { + // Host is specified. + switch (hints->ai_family) + { + case ASIO_OS_DEF(AF_INET): + search[search_count].host = host; + search[search_count].family = ASIO_OS_DEF(AF_INET); + ++search_count; + break; + case ASIO_OS_DEF(AF_INET6): + search[search_count].host = host; + search[search_count].family = ASIO_OS_DEF(AF_INET6); + ++search_count; + break; + case ASIO_OS_DEF(AF_UNSPEC): + search[search_count].host = host; + search[search_count].family = ASIO_OS_DEF(AF_INET6); + ++search_count; + search[search_count].host = host; + search[search_count].family = ASIO_OS_DEF(AF_INET); + ++search_count; + break; + default: + break; + } + } + return search_count; +} + +template <typename T> +inline T* gai_alloc(std::size_t size = sizeof(T)) +{ + using namespace std; + T* p = static_cast<T*>(::operator new(size, std::nothrow)); + if (p) + memset(p, 0, size); + return p; +} + +inline void gai_free(void* p) +{ + ::operator delete(p); +} + +inline void gai_strcpy(char* target, const char* source, std::size_t max_size) +{ + using namespace std; +#if defined(ASIO_HAS_SECURE_RTL) + strcpy_s(target, max_size, source); +#else // defined(ASIO_HAS_SECURE_RTL) + *target = 0; + if (max_size > 0) + strncat(target, source, max_size - 1); +#endif // defined(ASIO_HAS_SECURE_RTL) +} + +enum { gai_clone_flag = 1 << 30 }; + +inline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints, + const void* addr, int family) +{ + using namespace std; + + addrinfo_type* ai = gai_alloc<addrinfo_type>(); + if (ai == 0) + return EAI_MEMORY; + + ai->ai_next = 0; + **next = ai; + *next = &ai->ai_next; + + ai->ai_canonname = 0; + ai->ai_socktype = hints->ai_socktype; + if (ai->ai_socktype == 0) + ai->ai_flags |= gai_clone_flag; + ai->ai_protocol = hints->ai_protocol; + ai->ai_family = family; + + switch (ai->ai_family) + { + case ASIO_OS_DEF(AF_INET): + { + sockaddr_in4_type* sinptr = gai_alloc<sockaddr_in4_type>(); + if (sinptr == 0) + return EAI_MEMORY; + sinptr->sin_family = ASIO_OS_DEF(AF_INET); + memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type)); + ai->ai_addr = reinterpret_cast<sockaddr*>(sinptr); + ai->ai_addrlen = sizeof(sockaddr_in4_type); + break; + } + case ASIO_OS_DEF(AF_INET6): + { + sockaddr_in6_type* sin6ptr = gai_alloc<sockaddr_in6_type>(); + if (sin6ptr == 0) + return EAI_MEMORY; + sin6ptr->sin6_family = ASIO_OS_DEF(AF_INET6); + memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type)); + ai->ai_addr = reinterpret_cast<sockaddr*>(sin6ptr); + ai->ai_addrlen = sizeof(sockaddr_in6_type); + break; + } + default: + break; + } + + return 0; +} + +inline addrinfo_type* gai_clone(addrinfo_type* ai) +{ + using namespace std; + + addrinfo_type* new_ai = gai_alloc<addrinfo_type>(); + if (new_ai == 0) + return new_ai; + + new_ai->ai_next = ai->ai_next; + ai->ai_next = new_ai; + + new_ai->ai_flags = 0; + new_ai->ai_family = ai->ai_family; + new_ai->ai_socktype = ai->ai_socktype; + new_ai->ai_protocol = ai->ai_protocol; + new_ai->ai_canonname = 0; + new_ai->ai_addrlen = ai->ai_addrlen; + new_ai->ai_addr = gai_alloc<sockaddr>(ai->ai_addrlen); + memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen); + + return new_ai; +} + +inline int gai_port(addrinfo_type* aihead, int port, int socktype) +{ + int num_found = 0; + + for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next) + { + if (ai->ai_flags & gai_clone_flag) + { + if (ai->ai_socktype != 0) + { + ai = gai_clone(ai); + if (ai == 0) + return -1; + // ai now points to newly cloned entry. + } + } + else if (ai->ai_socktype != socktype) + { + // Ignore if mismatch on socket type. + continue; + } + + ai->ai_socktype = socktype; + + switch (ai->ai_family) + { + case ASIO_OS_DEF(AF_INET): + { + sockaddr_in4_type* sinptr = + reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr); + sinptr->sin_port = port; + ++num_found; + break; + } + case ASIO_OS_DEF(AF_INET6): + { + sockaddr_in6_type* sin6ptr = + reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr); + sin6ptr->sin6_port = port; + ++num_found; + break; + } + default: + break; + } + } + + return num_found; +} + +inline int gai_serv(addrinfo_type* aihead, + const addrinfo_type* hints, const char* serv) +{ + using namespace std; + + int num_found = 0; + + if ( +#if defined(AI_NUMERICSERV) + (hints->ai_flags & AI_NUMERICSERV) || +#endif + isdigit(static_cast<unsigned char>(serv[0]))) + { + int port = htons(atoi(serv)); + if (hints->ai_socktype) + { + // Caller specifies socket type. + int rc = gai_port(aihead, port, hints->ai_socktype); + if (rc < 0) + return EAI_MEMORY; + num_found += rc; + } + else + { + // Caller does not specify socket type. + int rc = gai_port(aihead, port, SOCK_STREAM); + if (rc < 0) + return EAI_MEMORY; + num_found += rc; + rc = gai_port(aihead, port, SOCK_DGRAM); + if (rc < 0) + return EAI_MEMORY; + num_found += rc; + } + } + else + { + // Try service name with TCP first, then UDP. + if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM) + { + servent* sptr = getservbyname(serv, "tcp"); + if (sptr != 0) + { + int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM); + if (rc < 0) + return EAI_MEMORY; + num_found += rc; + } + } + if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM) + { + servent* sptr = getservbyname(serv, "udp"); + if (sptr != 0) + { + int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM); + if (rc < 0) + return EAI_MEMORY; + num_found += rc; + } + } + } + + if (num_found == 0) + { + if (hints->ai_socktype == 0) + { + // All calls to getservbyname() failed. + return EAI_NONAME; + } + else + { + // Service not supported for socket type. + return EAI_SERVICE; + } + } + + return 0; +} + +inline int gai_echeck(const char* host, const char* service, + int flags, int family, int socktype, int protocol) +{ + (void)(flags); + (void)(protocol); + + // Host or service must be specified. + if (host == 0 || host[0] == '\0') + if (service == 0 || service[0] == '\0') + return EAI_NONAME; + + // Check combination of family and socket type. + switch (family) + { + case ASIO_OS_DEF(AF_UNSPEC): + break; + case ASIO_OS_DEF(AF_INET): + case ASIO_OS_DEF(AF_INET6): + if (service != 0 && service[0] != '\0') + if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM) + return EAI_SOCKTYPE; + break; + default: + return EAI_FAMILY; + } + + return 0; +} + +inline void freeaddrinfo_emulation(addrinfo_type* aihead) +{ + addrinfo_type* ai = aihead; + while (ai) + { + gai_free(ai->ai_addr); + gai_free(ai->ai_canonname); + addrinfo_type* ainext = ai->ai_next; + gai_free(ai); + ai = ainext; + } +} + +inline int getaddrinfo_emulation(const char* host, const char* service, + const addrinfo_type* hintsp, addrinfo_type** result) +{ + // Set up linked list of addrinfo structures. + addrinfo_type* aihead = 0; + addrinfo_type** ainext = &aihead; + char* canon = 0; + + // Supply default hints if not specified by caller. + addrinfo_type hints = addrinfo_type(); + hints.ai_family = ASIO_OS_DEF(AF_UNSPEC); + if (hintsp) + hints = *hintsp; + + // If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED + // and AI_ALL flags. +#if defined(AI_V4MAPPED) + if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) + hints.ai_flags &= ~AI_V4MAPPED; +#endif +#if defined(AI_ALL) + if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) + hints.ai_flags &= ~AI_ALL; +#endif + + // Basic error checking. + int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family, + hints.ai_socktype, hints.ai_protocol); + if (rc != 0) + { + freeaddrinfo_emulation(aihead); + return rc; + } + + gai_search search[2]; + int search_count = gai_nsearch(host, &hints, search); + for (gai_search* sptr = search; sptr < search + search_count; ++sptr) + { + // Check for IPv4 dotted decimal string. + in4_addr_type inaddr; + asio::error_code ec; + if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), + sptr->host, &inaddr, 0, ec) == 1) + { + if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) + && hints.ai_family != ASIO_OS_DEF(AF_INET)) + { + freeaddrinfo_emulation(aihead); + gai_free(canon); + return EAI_FAMILY; + } + if (sptr->family == ASIO_OS_DEF(AF_INET)) + { + rc = gai_aistruct(&ainext, &hints, &inaddr, ASIO_OS_DEF(AF_INET)); + if (rc != 0) + { + freeaddrinfo_emulation(aihead); + gai_free(canon); + return rc; + } + } + continue; + } + + // Check for IPv6 hex string. + in6_addr_type in6addr; + if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), + sptr->host, &in6addr, 0, ec) == 1) + { + if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) + && hints.ai_family != ASIO_OS_DEF(AF_INET6)) + { + freeaddrinfo_emulation(aihead); + gai_free(canon); + return EAI_FAMILY; + } + if (sptr->family == ASIO_OS_DEF(AF_INET6)) + { + rc = gai_aistruct(&ainext, &hints, &in6addr, + ASIO_OS_DEF(AF_INET6)); + if (rc != 0) + { + freeaddrinfo_emulation(aihead); + gai_free(canon); + return rc; + } + } + continue; + } + + // Look up hostname. + hostent hent; + char hbuf[8192] = ""; + hostent* hptr = socket_ops::gethostbyname(sptr->host, + sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec); + if (hptr == 0) + { + if (search_count == 2) + { + // Failure is OK if there are multiple searches. + continue; + } + freeaddrinfo_emulation(aihead); + gai_free(canon); + if (ec == asio::error::host_not_found) + return EAI_NONAME; + if (ec == asio::error::host_not_found_try_again) + return EAI_AGAIN; + if (ec == asio::error::no_recovery) + return EAI_FAIL; + if (ec == asio::error::no_data) + return EAI_NONAME; + return EAI_NONAME; + } + + // Check for address family mismatch if one was specified. + if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) + && hints.ai_family != hptr->h_addrtype) + { + freeaddrinfo_emulation(aihead); + gai_free(canon); + socket_ops::freehostent(hptr); + return EAI_FAMILY; + } + + // Save canonical name first time. + if (host != 0 && host[0] != '\0' && hptr->h_name && hptr->h_name[0] + && (hints.ai_flags & AI_CANONNAME) && canon == 0) + { + std::size_t canon_len = strlen(hptr->h_name) + 1; + canon = gai_alloc<char>(canon_len); + if (canon == 0) + { + freeaddrinfo_emulation(aihead); + socket_ops::freehostent(hptr); + return EAI_MEMORY; + } + gai_strcpy(canon, hptr->h_name, canon_len); + } + + // Create an addrinfo structure for each returned address. + for (char** ap = hptr->h_addr_list; *ap; ++ap) + { + rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype); + if (rc != 0) + { + freeaddrinfo_emulation(aihead); + gai_free(canon); + socket_ops::freehostent(hptr); + return EAI_FAMILY; + } + } + + socket_ops::freehostent(hptr); + } + + // Check if we found anything. + if (aihead == 0) + { + gai_free(canon); + return EAI_NONAME; + } + + // Return canonical name in first entry. + if (host != 0 && host[0] != '\0' && (hints.ai_flags & AI_CANONNAME)) + { + if (canon) + { + aihead->ai_canonname = canon; + canon = 0; + } + else + { + std::size_t canonname_len = strlen(search[0].host) + 1; + aihead->ai_canonname = gai_alloc<char>(canonname_len); + if (aihead->ai_canonname == 0) + { + freeaddrinfo_emulation(aihead); + return EAI_MEMORY; + } + gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len); + } + } + gai_free(canon); + + // Process the service name. + if (service != 0 && service[0] != '\0') + { + rc = gai_serv(aihead, &hints, service); + if (rc != 0) + { + freeaddrinfo_emulation(aihead); + return rc; + } + } + + // Return result to caller. + *result = aihead; + return 0; +} + +inline asio::error_code getnameinfo_emulation( + const socket_addr_type* sa, std::size_t salen, char* host, + std::size_t hostlen, char* serv, std::size_t servlen, int flags, + asio::error_code& ec) +{ + using namespace std; + + const char* addr; + size_t addr_len; + unsigned short port; + switch (sa->sa_family) + { + case ASIO_OS_DEF(AF_INET): + if (salen != sizeof(sockaddr_in4_type)) + { + return ec = asio::error::invalid_argument; + } + addr = reinterpret_cast<const char*>( + &reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_addr); + addr_len = sizeof(in4_addr_type); + port = reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_port; + break; + case ASIO_OS_DEF(AF_INET6): + if (salen != sizeof(sockaddr_in6_type)) + { + return ec = asio::error::invalid_argument; + } + addr = reinterpret_cast<const char*>( + &reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_addr); + addr_len = sizeof(in6_addr_type); + port = reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_port; + break; + default: + return ec = asio::error::address_family_not_supported; + } + + if (host && hostlen > 0) + { + if (flags & NI_NUMERICHOST) + { + if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) + { + return ec; + } + } + else + { + hostent hent; + char hbuf[8192] = ""; + hostent* hptr = socket_ops::gethostbyaddr(addr, + static_cast<int>(addr_len), sa->sa_family, + &hent, hbuf, sizeof(hbuf), ec); + if (hptr && hptr->h_name && hptr->h_name[0] != '\0') + { + if (flags & NI_NOFQDN) + { + char* dot = strchr(hptr->h_name, '.'); + if (dot) + { + *dot = 0; + } + } + gai_strcpy(host, hptr->h_name, hostlen); + socket_ops::freehostent(hptr); + } + else + { + socket_ops::freehostent(hptr); + if (flags & NI_NAMEREQD) + { + return ec = asio::error::host_not_found; + } + if (socket_ops::inet_ntop(sa->sa_family, + addr, host, hostlen, 0, ec) == 0) + { + return ec; + } + } + } + } + + if (serv && servlen > 0) + { + if (flags & NI_NUMERICSERV) + { + if (servlen < 6) + { + return ec = asio::error::no_buffer_space; + } +#if defined(ASIO_HAS_SECURE_RTL) + sprintf_s(serv, servlen, "%u", ntohs(port)); +#else // defined(ASIO_HAS_SECURE_RTL) + sprintf(serv, "%u", ntohs(port)); +#endif // defined(ASIO_HAS_SECURE_RTL) + } + else + { +#if defined(ASIO_HAS_PTHREADS) + static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + ::pthread_mutex_lock(&mutex); +#endif // defined(ASIO_HAS_PTHREADS) + servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? "udp" : 0); + if (sptr && sptr->s_name && sptr->s_name[0] != '\0') + { + gai_strcpy(serv, sptr->s_name, servlen); + } + else + { + if (servlen < 6) + { + return ec = asio::error::no_buffer_space; + } +#if defined(ASIO_HAS_SECURE_RTL) + sprintf_s(serv, servlen, "%u", ntohs(port)); +#else // defined(ASIO_HAS_SECURE_RTL) + sprintf(serv, "%u", ntohs(port)); +#endif // defined(ASIO_HAS_SECURE_RTL) + } +#if defined(ASIO_HAS_PTHREADS) + ::pthread_mutex_unlock(&mutex); +#endif // defined(ASIO_HAS_PTHREADS) + } + } + + ec = asio::error_code(); + return ec; +} + +#endif // !defined(ASIO_HAS_GETADDRINFO) + +inline asio::error_code translate_addrinfo_error(int error) +{ + switch (error) + { + case 0: + return asio::error_code(); + case EAI_AGAIN: + return asio::error::host_not_found_try_again; + case EAI_BADFLAGS: + return asio::error::invalid_argument; + case EAI_FAIL: + return asio::error::no_recovery; + case EAI_FAMILY: + return asio::error::address_family_not_supported; + case EAI_MEMORY: + return asio::error::no_memory; + case EAI_NONAME: +#if defined(EAI_ADDRFAMILY) + case EAI_ADDRFAMILY: +#endif +#if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME) + case EAI_NODATA: +#endif + return asio::error::host_not_found; + case EAI_SERVICE: + return asio::error::service_not_found; + case EAI_SOCKTYPE: + return asio::error::socket_type_not_supported; + default: // Possibly the non-portable EAI_SYSTEM. +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + return asio::error_code( + WSAGetLastError(), asio::error::get_system_category()); +#else + return asio::error_code( + errno, asio::error::get_system_category()); +#endif + } +} + +asio::error_code getaddrinfo(const char* host, + const char* service, const addrinfo_type& hints, + addrinfo_type** result, asio::error_code& ec) +{ + host = (host && *host) ? host : 0; + service = (service && *service) ? service : 0; + clear_last_error(); +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) +# if defined(ASIO_HAS_GETADDRINFO) + // Building for Windows XP, Windows Server 2003, or later. + int error = ::getaddrinfo(host, service, &hints, result); + return ec = translate_addrinfo_error(error); +# else + // Building for Windows 2000 or earlier. + typedef int (WSAAPI *gai_t)(const char*, + const char*, const addrinfo_type*, addrinfo_type**); + if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) + { + if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, "getaddrinfo")) + { + int error = gai(host, service, &hints, result); + return ec = translate_addrinfo_error(error); + } + } + int error = getaddrinfo_emulation(host, service, &hints, result); + return ec = translate_addrinfo_error(error); +# endif +#elif !defined(ASIO_HAS_GETADDRINFO) + int error = getaddrinfo_emulation(host, service, &hints, result); + return ec = translate_addrinfo_error(error); +#else + int error = ::getaddrinfo(host, service, &hints, result); +#if defined(__MACH__) && defined(__APPLE__) + using namespace std; // For isdigit and atoi. + if (error == 0 && service && isdigit(static_cast<unsigned char>(service[0]))) + { + u_short_type port = host_to_network_short(atoi(service)); + for (addrinfo_type* ai = *result; ai; ai = ai->ai_next) + { + switch (ai->ai_family) + { + case ASIO_OS_DEF(AF_INET): + { + sockaddr_in4_type* sinptr = + reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr); + if (sinptr->sin_port == 0) + sinptr->sin_port = port; + break; + } + case ASIO_OS_DEF(AF_INET6): + { + sockaddr_in6_type* sin6ptr = + reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr); + if (sin6ptr->sin6_port == 0) + sin6ptr->sin6_port = port; + break; + } + default: + break; + } + } + } +#endif + return ec = translate_addrinfo_error(error); +#endif +} + +asio::error_code background_getaddrinfo( + const weak_cancel_token_type& cancel_token, const char* host, + const char* service, const addrinfo_type& hints, + addrinfo_type** result, asio::error_code& ec) +{ + if (cancel_token.expired()) + ec = asio::error::operation_aborted; + else + socket_ops::getaddrinfo(host, service, hints, result, ec); + return ec; +} + +void freeaddrinfo(addrinfo_type* ai) +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) +# if defined(ASIO_HAS_GETADDRINFO) + // Building for Windows XP, Windows Server 2003, or later. + ::freeaddrinfo(ai); +# else + // Building for Windows 2000 or earlier. + typedef int (WSAAPI *fai_t)(addrinfo_type*); + if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) + { + if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, "freeaddrinfo")) + { + fai(ai); + return; + } + } + freeaddrinfo_emulation(ai); +# endif +#elif !defined(ASIO_HAS_GETADDRINFO) + freeaddrinfo_emulation(ai); +#else + ::freeaddrinfo(ai); +#endif +} + +asio::error_code getnameinfo(const socket_addr_type* addr, + std::size_t addrlen, char* host, std::size_t hostlen, + char* serv, std::size_t servlen, int flags, asio::error_code& ec) +{ +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) +# if defined(ASIO_HAS_GETADDRINFO) + // Building for Windows XP, Windows Server 2003, or later. + clear_last_error(); + int error = ::getnameinfo(addr, static_cast<socklen_t>(addrlen), + host, static_cast<DWORD>(hostlen), + serv, static_cast<DWORD>(servlen), flags); + return ec = translate_addrinfo_error(error); +# else + // Building for Windows 2000 or earlier. + typedef int (WSAAPI *gni_t)(const socket_addr_type*, + int, char*, DWORD, char*, DWORD, int); + if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) + { + if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, "getnameinfo")) + { + clear_last_error(); + int error = gni(addr, static_cast<int>(addrlen), + host, static_cast<DWORD>(hostlen), + serv, static_cast<DWORD>(servlen), flags); + return ec = translate_addrinfo_error(error); + } + } + clear_last_error(); + return getnameinfo_emulation(addr, addrlen, + host, hostlen, serv, servlen, flags, ec); +# endif +#elif !defined(ASIO_HAS_GETADDRINFO) + using namespace std; // For memcpy. + sockaddr_storage_type tmp_addr; + memcpy(&tmp_addr, addr, addrlen); + addr = reinterpret_cast<socket_addr_type*>(&tmp_addr); + clear_last_error(); + return getnameinfo_emulation(addr, addrlen, + host, hostlen, serv, servlen, flags, ec); +#else + clear_last_error(); + int error = ::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags); + return ec = translate_addrinfo_error(error); +#endif +} + +asio::error_code sync_getnameinfo( + const socket_addr_type* addr, std::size_t addrlen, + char* host, std::size_t hostlen, char* serv, + std::size_t servlen, int sock_type, asio::error_code& ec) +{ + // First try resolving with the service name. If that fails try resolving + // but allow the service to be returned as a number. + int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; + socket_ops::getnameinfo(addr, addrlen, host, + hostlen, serv, servlen, flags, ec); + if (ec) + { + socket_ops::getnameinfo(addr, addrlen, host, hostlen, + serv, servlen, flags | NI_NUMERICSERV, ec); + } + + return ec; +} + +asio::error_code background_getnameinfo( + const weak_cancel_token_type& cancel_token, + const socket_addr_type* addr, std::size_t addrlen, + char* host, std::size_t hostlen, char* serv, + std::size_t servlen, int sock_type, asio::error_code& ec) +{ + if (cancel_token.expired()) + { + ec = asio::error::operation_aborted; + } + else + { + // First try resolving with the service name. If that fails try resolving + // but allow the service to be returned as a number. + int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; + socket_ops::getnameinfo(addr, addrlen, host, + hostlen, serv, servlen, flags, ec); + if (ec) + { + socket_ops::getnameinfo(addr, addrlen, host, hostlen, + serv, servlen, flags | NI_NUMERICSERV, ec); + } + } + + return ec; +} + +#endif // !defined(ASIO_WINDOWS_RUNTIME) + +u_long_type network_to_host_long(u_long_type value) +{ +#if defined(ASIO_WINDOWS_RUNTIME) + unsigned char* value_p = reinterpret_cast<unsigned char*>(&value); + u_long_type result = (static_cast<u_long_type>(value_p[0]) << 24) + | (static_cast<u_long_type>(value_p[1]) << 16) + | (static_cast<u_long_type>(value_p[2]) << 8) + | static_cast<u_long_type>(value_p[3]); + return result; +#else // defined(ASIO_WINDOWS_RUNTIME) + return ntohl(value); +#endif // defined(ASIO_WINDOWS_RUNTIME) +} + +u_long_type host_to_network_long(u_long_type value) +{ +#if defined(ASIO_WINDOWS_RUNTIME) + u_long_type result; + unsigned char* result_p = reinterpret_cast<unsigned char*>(&result); + result_p[0] = static_cast<unsigned char>((value >> 24) & 0xFF); + result_p[1] = static_cast<unsigned char>((value >> 16) & 0xFF); + result_p[2] = static_cast<unsigned char>((value >> 8) & 0xFF); + result_p[3] = static_cast<unsigned char>(value & 0xFF); + return result; +#else // defined(ASIO_WINDOWS_RUNTIME) + return htonl(value); +#endif // defined(ASIO_WINDOWS_RUNTIME) +} + +u_short_type network_to_host_short(u_short_type value) +{ +#if defined(ASIO_WINDOWS_RUNTIME) + unsigned char* value_p = reinterpret_cast<unsigned char*>(&value); + u_short_type result = (static_cast<u_short_type>(value_p[0]) << 8) + | static_cast<u_short_type>(value_p[1]); + return result; +#else // defined(ASIO_WINDOWS_RUNTIME) + return ntohs(value); +#endif // defined(ASIO_WINDOWS_RUNTIME) +} + +u_short_type host_to_network_short(u_short_type value) +{ +#if defined(ASIO_WINDOWS_RUNTIME) + u_short_type result; + unsigned char* result_p = reinterpret_cast<unsigned char*>(&result); + result_p[0] = static_cast<unsigned char>((value >> 8) & 0xFF); + result_p[1] = static_cast<unsigned char>(value & 0xFF); + return result; +#else // defined(ASIO_WINDOWS_RUNTIME) + return htons(value); +#endif // defined(ASIO_WINDOWS_RUNTIME) +} + +} // namespace socket_ops +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_SOCKET_OPS_IPP diff --git a/lib/asio/detail/impl/socket_select_interrupter.ipp b/lib/asio/detail/impl/socket_select_interrupter.ipp new file mode 100644 index 0000000..052f2a7 --- /dev/null +++ b/lib/asio/detail/impl/socket_select_interrupter.ipp @@ -0,0 +1,176 @@ +// +// detail/impl/socket_select_interrupter.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP +#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if !defined(ASIO_WINDOWS_RUNTIME) + +#if defined(ASIO_WINDOWS) \ + || defined(__CYGWIN__) \ + || defined(__SYMBIAN32__) + +#include <cstdlib> +#include "asio/detail/socket_holder.hpp" +#include "asio/detail/socket_ops.hpp" +#include "asio/detail/socket_select_interrupter.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +socket_select_interrupter::socket_select_interrupter() +{ + open_descriptors(); +} + +void socket_select_interrupter::open_descriptors() +{ + asio::error_code ec; + socket_holder acceptor(socket_ops::socket( + AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); + if (acceptor.get() == invalid_socket) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + int opt = 1; + socket_ops::state_type acceptor_state = 0; + socket_ops::setsockopt(acceptor.get(), acceptor_state, + SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec); + + using namespace std; // For memset. + sockaddr_in4_type addr; + std::size_t addr_len = sizeof(addr); + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); + addr.sin_port = 0; + if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr, + addr_len, ec) == socket_error_retval) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr, + &addr_len, ec) == socket_error_retval) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + // Some broken firewalls on Windows will intermittently cause getsockname to + // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We + // explicitly specify the target address here to work around this problem. + if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY)) + addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); + + if (socket_ops::listen(acceptor.get(), + SOMAXCONN, ec) == socket_error_retval) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + socket_holder client(socket_ops::socket( + AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); + if (client.get() == invalid_socket) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr, + addr_len, ec) == socket_error_retval) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec)); + if (server.get() == invalid_socket) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + ioctl_arg_type non_blocking = 1; + socket_ops::state_type client_state = 0; + if (socket_ops::ioctl(client.get(), client_state, + FIONBIO, &non_blocking, ec)) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + opt = 1; + socket_ops::setsockopt(client.get(), client_state, + IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); + + non_blocking = 1; + socket_ops::state_type server_state = 0; + if (socket_ops::ioctl(server.get(), server_state, + FIONBIO, &non_blocking, ec)) + asio::detail::throw_error(ec, "socket_select_interrupter"); + + opt = 1; + socket_ops::setsockopt(server.get(), server_state, + IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); + + read_descriptor_ = server.release(); + write_descriptor_ = client.release(); +} + +socket_select_interrupter::~socket_select_interrupter() +{ + close_descriptors(); +} + +void socket_select_interrupter::close_descriptors() +{ + asio::error_code ec; + socket_ops::state_type state = socket_ops::internal_non_blocking; + if (read_descriptor_ != invalid_socket) + socket_ops::close(read_descriptor_, state, true, ec); + if (write_descriptor_ != invalid_socket) + socket_ops::close(write_descriptor_, state, true, ec); +} + +void socket_select_interrupter::recreate() +{ + close_descriptors(); + + write_descriptor_ = invalid_socket; + read_descriptor_ = invalid_socket; + + open_descriptors(); +} + +void socket_select_interrupter::interrupt() +{ + char byte = 0; + socket_ops::buf b; + socket_ops::init_buf(b, &byte, 1); + asio::error_code ec; + socket_ops::send(write_descriptor_, &b, 1, 0, ec); +} + +bool socket_select_interrupter::reset() +{ + char data[1024]; + socket_ops::buf b; + socket_ops::init_buf(b, data, sizeof(data)); + asio::error_code ec; + int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); + bool was_interrupted = (bytes_read > 0); + while (bytes_read == sizeof(data)) + bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); + return was_interrupted; +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) + // || defined(__CYGWIN__) + // || defined(__SYMBIAN32__) + +#endif // !defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP diff --git a/lib/asio/detail/impl/strand_executor_service.hpp b/lib/asio/detail/impl/strand_executor_service.hpp new file mode 100644 index 0000000..0e18ca0 --- /dev/null +++ b/lib/asio/detail/impl/strand_executor_service.hpp @@ -0,0 +1,179 @@ +// +// detail/impl/strand_executor_service.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP +#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/call_stack.hpp" +#include "asio/detail/fenced_block.hpp" +#include "asio/detail/handler_invoke_helpers.hpp" +#include "asio/detail/recycling_allocator.hpp" +#include "asio/executor_work_guard.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Executor> +class strand_executor_service::invoker +{ +public: + invoker(const implementation_type& impl, Executor& ex) + : impl_(impl), + work_(ex) + { + } + + invoker(const invoker& other) + : impl_(other.impl_), + work_(other.work_) + { + } + +#if defined(ASIO_HAS_MOVE) + invoker(invoker&& other) + : impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)), + work_(ASIO_MOVE_CAST(executor_work_guard<Executor>)(other.work_)) + { + } +#endif // defined(ASIO_HAS_MOVE) + + struct on_invoker_exit + { + invoker* this_; + + ~on_invoker_exit() + { + this_->impl_->mutex_->lock(); + this_->impl_->ready_queue_.push(this_->impl_->waiting_queue_); + bool more_handlers = this_->impl_->locked_ = + !this_->impl_->ready_queue_.empty(); + this_->impl_->mutex_->unlock(); + + if (more_handlers) + { + Executor ex(this_->work_.get_executor()); + recycling_allocator<void> allocator; + ex.post(ASIO_MOVE_CAST(invoker)(*this_), allocator); + } + } + }; + + void operator()() + { + // Indicate that this strand is executing on the current thread. + call_stack<strand_impl>::context ctx(impl_.get()); + + // Ensure the next handler, if any, is scheduled on block exit. + on_invoker_exit on_exit = { this }; + (void)on_exit; + + // Run all ready handlers. No lock is required since the ready queue is + // accessed only within the strand. + asio::error_code ec; + while (scheduler_operation* o = impl_->ready_queue_.front()) + { + impl_->ready_queue_.pop(); + o->complete(impl_.get(), ec, 0); + } + } + +private: + implementation_type impl_; + executor_work_guard<Executor> work_; +}; + +template <typename Executor, typename Function, typename Allocator> +void strand_executor_service::dispatch(const implementation_type& impl, + Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a) +{ + typedef typename decay<Function>::type function_type; + + // If we are already in the strand then the function can run immediately. + if (call_stack<strand_impl>::contains(impl.get())) + { + // Make a local, non-const copy of the function. + function_type tmp(ASIO_MOVE_CAST(Function)(function)); + + fenced_block b(fenced_block::full); + asio_handler_invoke_helpers::invoke(tmp, tmp); + return; + } + + // Allocate and construct an operation to wrap the function. + typedef executor_op<function_type, Allocator> op; + typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; + p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a); + + ASIO_HANDLER_CREATION((impl->service_->context(), *p.p, + "strand_executor", impl.get(), 0, "dispatch")); + + // Add the function to the strand and schedule the strand if required. + bool first = enqueue(impl, p.p); + p.v = p.p = 0; + if (first) + ex.dispatch(invoker<Executor>(impl, ex), a); +} + +// Request invocation of the given function and return immediately. +template <typename Executor, typename Function, typename Allocator> +void strand_executor_service::post(const implementation_type& impl, + Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a) +{ + typedef typename decay<Function>::type function_type; + + // Allocate and construct an operation to wrap the function. + typedef executor_op<function_type, Allocator> op; + typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; + p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a); + + ASIO_HANDLER_CREATION((impl->service_->context(), *p.p, + "strand_executor", impl.get(), 0, "post")); + + // Add the function to the strand and schedule the strand if required. + bool first = enqueue(impl, p.p); + p.v = p.p = 0; + if (first) + ex.post(invoker<Executor>(impl, ex), a); +} + +// Request invocation of the given function and return immediately. +template <typename Executor, typename Function, typename Allocator> +void strand_executor_service::defer(const implementation_type& impl, + Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a) +{ + typedef typename decay<Function>::type function_type; + + // Allocate and construct an operation to wrap the function. + typedef executor_op<function_type, Allocator> op; + typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; + p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a); + + ASIO_HANDLER_CREATION((impl->service_->context(), *p.p, + "strand_executor", impl.get(), 0, "defer")); + + // Add the function to the strand and schedule the strand if required. + bool first = enqueue(impl, p.p); + p.v = p.p = 0; + if (first) + ex.defer(invoker<Executor>(impl, ex), a); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP diff --git a/lib/asio/detail/impl/strand_executor_service.ipp b/lib/asio/detail/impl/strand_executor_service.ipp new file mode 100644 index 0000000..365652e --- /dev/null +++ b/lib/asio/detail/impl/strand_executor_service.ipp @@ -0,0 +1,134 @@ +// +// detail/impl/strand_executor_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP +#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include "asio/detail/strand_executor_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +strand_executor_service::strand_executor_service(execution_context& ctx) + : execution_context_service_base<strand_executor_service>(ctx), + mutex_(), + salt_(0), + impl_list_(0) +{ +} + +void strand_executor_service::shutdown() +{ + op_queue<scheduler_operation> ops; + + asio::detail::mutex::scoped_lock lock(mutex_); + + strand_impl* impl = impl_list_; + while (impl) + { + impl->mutex_->lock(); + impl->shutdown_ = true; + ops.push(impl->waiting_queue_); + ops.push(impl->ready_queue_); + impl->mutex_->unlock(); + impl = impl->next_; + } +} + +strand_executor_service::implementation_type +strand_executor_service::create_implementation() +{ + implementation_type new_impl(new strand_impl); + new_impl->locked_ = false; + new_impl->shutdown_ = false; + + asio::detail::mutex::scoped_lock lock(mutex_); + + // Select a mutex from the pool of shared mutexes. + std::size_t salt = salt_++; + std::size_t mutex_index = reinterpret_cast<std::size_t>(new_impl.get()); + mutex_index += (reinterpret_cast<std::size_t>(new_impl.get()) >> 3); + mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2); + mutex_index = mutex_index % num_mutexes; + if (!mutexes_[mutex_index].get()) + mutexes_[mutex_index].reset(new mutex); + new_impl->mutex_ = mutexes_[mutex_index].get(); + + // Insert implementation into linked list of all implementations. + new_impl->next_ = impl_list_; + new_impl->prev_ = 0; + if (impl_list_) + impl_list_->prev_ = new_impl.get(); + impl_list_ = new_impl.get(); + new_impl->service_ = this; + + return new_impl; +} + +strand_executor_service::strand_impl::~strand_impl() +{ + asio::detail::mutex::scoped_lock lock(service_->mutex_); + + // Remove implementation from linked list of all implementations. + if (service_->impl_list_ == this) + service_->impl_list_ = next_; + if (prev_) + prev_->next_ = next_; + if (next_) + next_->prev_= prev_; +} + +bool strand_executor_service::enqueue(const implementation_type& impl, + scheduler_operation* op) +{ + impl->mutex_->lock(); + if (impl->shutdown_) + { + impl->mutex_->unlock(); + op->destroy(); + return false; + } + else if (impl->locked_) + { + // Some other function already holds the strand lock. Enqueue for later. + impl->waiting_queue_.push(op); + impl->mutex_->unlock(); + return false; + } + else + { + // The function is acquiring the strand lock and so is responsible for + // scheduling the strand. + impl->locked_ = true; + impl->mutex_->unlock(); + impl->ready_queue_.push(op); + return true; + } +} + +bool strand_executor_service::running_in_this_thread( + const implementation_type& impl) +{ + return !!call_stack<strand_impl>::contains(impl.get()); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP diff --git a/lib/asio/detail/impl/strand_service.hpp b/lib/asio/detail/impl/strand_service.hpp new file mode 100644 index 0000000..da5b716 --- /dev/null +++ b/lib/asio/detail/impl/strand_service.hpp @@ -0,0 +1,118 @@ +// +// detail/impl/strand_service.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP +#define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/call_stack.hpp" +#include "asio/detail/completion_handler.hpp" +#include "asio/detail/fenced_block.hpp" +#include "asio/detail/handler_alloc_helpers.hpp" +#include "asio/detail/handler_invoke_helpers.hpp" +#include "asio/detail/memory.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +inline strand_service::strand_impl::strand_impl() + : operation(&strand_service::do_complete), + locked_(false) +{ +} + +struct strand_service::on_dispatch_exit +{ + io_context_impl* io_context_; + strand_impl* impl_; + + ~on_dispatch_exit() + { + impl_->mutex_.lock(); + impl_->ready_queue_.push(impl_->waiting_queue_); + bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); + impl_->mutex_.unlock(); + + if (more_handlers) + io_context_->post_immediate_completion(impl_, false); + } +}; + +template <typename Handler> +void strand_service::dispatch(strand_service::implementation_type& impl, + Handler& handler) +{ + // If we are already in the strand then the handler can run immediately. + if (call_stack<strand_impl>::contains(impl)) + { + fenced_block b(fenced_block::full); + asio_handler_invoke_helpers::invoke(handler, handler); + return; + } + + // Allocate and construct an operation to wrap the handler. + typedef completion_handler<Handler> op; + typename op::ptr p = { asio::detail::addressof(handler), + op::ptr::allocate(handler), 0 }; + p.p = new (p.v) op(handler); + + ASIO_HANDLER_CREATION((this->context(), + *p.p, "strand", impl, 0, "dispatch")); + + bool dispatch_immediately = do_dispatch(impl, p.p); + operation* o = p.p; + p.v = p.p = 0; + + if (dispatch_immediately) + { + // Indicate that this strand is executing on the current thread. + call_stack<strand_impl>::context ctx(impl); + + // Ensure the next handler, if any, is scheduled on block exit. + on_dispatch_exit on_exit = { &io_context_, impl }; + (void)on_exit; + + completion_handler<Handler>::do_complete( + &io_context_, o, asio::error_code(), 0); + } +} + +// Request the io_context to invoke the given handler and return immediately. +template <typename Handler> +void strand_service::post(strand_service::implementation_type& impl, + Handler& handler) +{ + bool is_continuation = + asio_handler_cont_helpers::is_continuation(handler); + + // Allocate and construct an operation to wrap the handler. + typedef completion_handler<Handler> op; + typename op::ptr p = { asio::detail::addressof(handler), + op::ptr::allocate(handler), 0 }; + p.p = new (p.v) op(handler); + + ASIO_HANDLER_CREATION((this->context(), + *p.p, "strand", impl, 0, "post")); + + do_post(impl, p.p, is_continuation); + p.v = p.p = 0; +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP diff --git a/lib/asio/detail/impl/strand_service.ipp b/lib/asio/detail/impl/strand_service.ipp new file mode 100644 index 0000000..cbaf25b --- /dev/null +++ b/lib/asio/detail/impl/strand_service.ipp @@ -0,0 +1,177 @@ +// +// detail/impl/strand_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP +#define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include "asio/detail/call_stack.hpp" +#include "asio/detail/strand_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +struct strand_service::on_do_complete_exit +{ + io_context_impl* owner_; + strand_impl* impl_; + + ~on_do_complete_exit() + { + impl_->mutex_.lock(); + impl_->ready_queue_.push(impl_->waiting_queue_); + bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); + impl_->mutex_.unlock(); + + if (more_handlers) + owner_->post_immediate_completion(impl_, true); + } +}; + +strand_service::strand_service(asio::io_context& io_context) + : asio::detail::service_base<strand_service>(io_context), + io_context_(asio::use_service<io_context_impl>(io_context)), + mutex_(), + salt_(0) +{ +} + +void strand_service::shutdown() +{ + op_queue<operation> ops; + + asio::detail::mutex::scoped_lock lock(mutex_); + + for (std::size_t i = 0; i < num_implementations; ++i) + { + if (strand_impl* impl = implementations_[i].get()) + { + ops.push(impl->waiting_queue_); + ops.push(impl->ready_queue_); + } + } +} + +void strand_service::construct(strand_service::implementation_type& impl) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + std::size_t salt = salt_++; +#if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) + std::size_t index = salt; +#else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) + std::size_t index = reinterpret_cast<std::size_t>(&impl); + index += (reinterpret_cast<std::size_t>(&impl) >> 3); + index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2); +#endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) + index = index % num_implementations; + + if (!implementations_[index].get()) + implementations_[index].reset(new strand_impl); + impl = implementations_[index].get(); +} + +bool strand_service::running_in_this_thread( + const implementation_type& impl) const +{ + return call_stack<strand_impl>::contains(impl) != 0; +} + +bool strand_service::do_dispatch(implementation_type& impl, operation* op) +{ + // If we are running inside the io_context, and no other handler already + // holds the strand lock, then the handler can run immediately. + bool can_dispatch = io_context_.can_dispatch(); + impl->mutex_.lock(); + if (can_dispatch && !impl->locked_) + { + // Immediate invocation is allowed. + impl->locked_ = true; + impl->mutex_.unlock(); + return true; + } + + if (impl->locked_) + { + // Some other handler already holds the strand lock. Enqueue for later. + impl->waiting_queue_.push(op); + impl->mutex_.unlock(); + } + else + { + // The handler is acquiring the strand lock and so is responsible for + // scheduling the strand. + impl->locked_ = true; + impl->mutex_.unlock(); + impl->ready_queue_.push(op); + io_context_.post_immediate_completion(impl, false); + } + + return false; +} + +void strand_service::do_post(implementation_type& impl, + operation* op, bool is_continuation) +{ + impl->mutex_.lock(); + if (impl->locked_) + { + // Some other handler already holds the strand lock. Enqueue for later. + impl->waiting_queue_.push(op); + impl->mutex_.unlock(); + } + else + { + // The handler is acquiring the strand lock and so is responsible for + // scheduling the strand. + impl->locked_ = true; + impl->mutex_.unlock(); + impl->ready_queue_.push(op); + io_context_.post_immediate_completion(impl, is_continuation); + } +} + +void strand_service::do_complete(void* owner, operation* base, + const asio::error_code& ec, std::size_t /*bytes_transferred*/) +{ + if (owner) + { + strand_impl* impl = static_cast<strand_impl*>(base); + + // Indicate that this strand is executing on the current thread. + call_stack<strand_impl>::context ctx(impl); + + // Ensure the next handler, if any, is scheduled on block exit. + on_do_complete_exit on_exit; + on_exit.owner_ = static_cast<io_context_impl*>(owner); + on_exit.impl_ = impl; + + // Run all ready handlers. No lock is required since the ready queue is + // accessed only within the strand. + while (operation* o = impl->ready_queue_.front()) + { + impl->ready_queue_.pop(); + o->complete(owner, ec, 0); + } + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP diff --git a/lib/asio/detail/impl/throw_error.ipp b/lib/asio/detail/impl/throw_error.ipp new file mode 100644 index 0000000..a540cd2 --- /dev/null +++ b/lib/asio/detail/impl/throw_error.ipp @@ -0,0 +1,60 @@ +// +// detail/impl/throw_error.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP +#define ASIO_DETAIL_IMPL_THROW_ERROR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/detail/throw_exception.hpp" +#include "asio/system_error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +void do_throw_error(const asio::error_code& err) +{ + asio::system_error e(err); + asio::detail::throw_exception(e); +} + +void do_throw_error(const asio::error_code& err, const char* location) +{ + // boostify: non-boost code starts here +#if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) + // Microsoft's implementation of std::system_error is non-conformant in that + // it ignores the error code's message when a "what" string is supplied. We'll + // work around this by explicitly formatting the "what" string. + std::string what_msg = location; + what_msg += ": "; + what_msg += err.message(); + asio::system_error e(err, what_msg); + asio::detail::throw_exception(e); +#else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) + // boostify: non-boost code ends here + asio::system_error e(err, location); + asio::detail::throw_exception(e); + // boostify: non-boost code starts here +#endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) + // boostify: non-boost code ends here +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP diff --git a/lib/asio/detail/impl/timer_queue_ptime.ipp b/lib/asio/detail/impl/timer_queue_ptime.ipp new file mode 100644 index 0000000..742837f --- /dev/null +++ b/lib/asio/detail/impl/timer_queue_ptime.ipp @@ -0,0 +1,91 @@ +// +// detail/impl/timer_queue_ptime.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP +#define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_BOOST_DATE_TIME) + +#include "asio/detail/timer_queue_ptime.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +timer_queue<time_traits<boost::posix_time::ptime> >::timer_queue() +{ +} + +timer_queue<time_traits<boost::posix_time::ptime> >::~timer_queue() +{ +} + +bool timer_queue<time_traits<boost::posix_time::ptime> >::enqueue_timer( + const time_type& time, per_timer_data& timer, wait_op* op) +{ + return impl_.enqueue_timer(time, timer, op); +} + +bool timer_queue<time_traits<boost::posix_time::ptime> >::empty() const +{ + return impl_.empty(); +} + +long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_msec( + long max_duration) const +{ + return impl_.wait_duration_msec(max_duration); +} + +long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_usec( + long max_duration) const +{ + return impl_.wait_duration_usec(max_duration); +} + +void timer_queue<time_traits<boost::posix_time::ptime> >::get_ready_timers( + op_queue<operation>& ops) +{ + impl_.get_ready_timers(ops); +} + +void timer_queue<time_traits<boost::posix_time::ptime> >::get_all_timers( + op_queue<operation>& ops) +{ + impl_.get_all_timers(ops); +} + +std::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer( + per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled) +{ + return impl_.cancel_timer(timer, ops, max_cancelled); +} + +void timer_queue<time_traits<boost::posix_time::ptime> >::move_timer( + per_timer_data& target, per_timer_data& source) +{ + impl_.move_timer(target, source); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_BOOST_DATE_TIME) + +#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP diff --git a/lib/asio/detail/impl/timer_queue_set.ipp b/lib/asio/detail/impl/timer_queue_set.ipp new file mode 100644 index 0000000..b516548 --- /dev/null +++ b/lib/asio/detail/impl/timer_queue_set.ipp @@ -0,0 +1,101 @@ +// +// detail/impl/timer_queue_set.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP +#define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" +#include "asio/detail/timer_queue_set.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +timer_queue_set::timer_queue_set() + : first_(0) +{ +} + +void timer_queue_set::insert(timer_queue_base* q) +{ + q->next_ = first_; + first_ = q; +} + +void timer_queue_set::erase(timer_queue_base* q) +{ + if (first_) + { + if (q == first_) + { + first_ = q->next_; + q->next_ = 0; + return; + } + + for (timer_queue_base* p = first_; p->next_; p = p->next_) + { + if (p->next_ == q) + { + p->next_ = q->next_; + q->next_ = 0; + return; + } + } + } +} + +bool timer_queue_set::all_empty() const +{ + for (timer_queue_base* p = first_; p; p = p->next_) + if (!p->empty()) + return false; + return true; +} + +long timer_queue_set::wait_duration_msec(long max_duration) const +{ + long min_duration = max_duration; + for (timer_queue_base* p = first_; p; p = p->next_) + min_duration = p->wait_duration_msec(min_duration); + return min_duration; +} + +long timer_queue_set::wait_duration_usec(long max_duration) const +{ + long min_duration = max_duration; + for (timer_queue_base* p = first_; p; p = p->next_) + min_duration = p->wait_duration_usec(min_duration); + return min_duration; +} + +void timer_queue_set::get_ready_timers(op_queue<operation>& ops) +{ + for (timer_queue_base* p = first_; p; p = p->next_) + p->get_ready_timers(ops); +} + +void timer_queue_set::get_all_timers(op_queue<operation>& ops) +{ + for (timer_queue_base* p = first_; p; p = p->next_) + p->get_all_timers(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP diff --git a/lib/asio/detail/impl/win_event.ipp b/lib/asio/detail/impl/win_event.ipp new file mode 100644 index 0000000..6f74649 --- /dev/null +++ b/lib/asio/detail/impl/win_event.ipp @@ -0,0 +1,76 @@ +// +// detail/win_event.ipp +// ~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP +#define ASIO_DETAIL_IMPL_WIN_EVENT_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS) + +#include "asio/detail/throw_error.hpp" +#include "asio/detail/win_event.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +win_event::win_event() + : state_(0) +{ +#if defined(ASIO_WINDOWS_APP) + events_[0] = ::CreateEventExW(0, 0, + CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS); +#else // defined(ASIO_WINDOWS_APP) + events_[0] = ::CreateEventW(0, true, false, 0); +#endif // defined(ASIO_WINDOWS_APP) + if (!events_[0]) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "event"); + } + +#if defined(ASIO_WINDOWS_APP) + events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS); +#else // defined(ASIO_WINDOWS_APP) + events_[1] = ::CreateEventW(0, false, false, 0); +#endif // defined(ASIO_WINDOWS_APP) + if (!events_[1]) + { + DWORD last_error = ::GetLastError(); + ::CloseHandle(events_[0]); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "event"); + } +} + +win_event::~win_event() +{ + ::CloseHandle(events_[0]); + ::CloseHandle(events_[1]); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) + +#endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP diff --git a/lib/asio/detail/impl/win_iocp_handle_service.ipp b/lib/asio/detail/impl/win_iocp_handle_service.ipp new file mode 100644 index 0000000..9cba2b0 --- /dev/null +++ b/lib/asio/detail/impl/win_iocp_handle_service.ipp @@ -0,0 +1,525 @@ +// +// detail/impl/win_iocp_handle_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP +#define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) + +#include "asio/detail/win_iocp_handle_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +class win_iocp_handle_service::overlapped_wrapper + : public OVERLAPPED +{ +public: + explicit overlapped_wrapper(asio::error_code& ec) + { + Internal = 0; + InternalHigh = 0; + Offset = 0; + OffsetHigh = 0; + + // Create a non-signalled manual-reset event, for GetOverlappedResult. + hEvent = ::CreateEventW(0, TRUE, FALSE, 0); + if (hEvent) + { + // As documented in GetQueuedCompletionStatus, setting the low order + // bit of this event prevents our synchronous writes from being treated + // as completion port events. + DWORD_PTR tmp = reinterpret_cast<DWORD_PTR>(hEvent); + hEvent = reinterpret_cast<HANDLE>(tmp | 1); + } + else + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + } + + ~overlapped_wrapper() + { + if (hEvent) + { + ::CloseHandle(hEvent); + } + } +}; + +win_iocp_handle_service::win_iocp_handle_service( + asio::io_context& io_context) + : service_base<win_iocp_handle_service>(io_context), + iocp_service_(asio::use_service<win_iocp_io_context>(io_context)), + mutex_(), + impl_list_(0) +{ +} + +void win_iocp_handle_service::shutdown() +{ + // Close all implementations, causing all operations to complete. + asio::detail::mutex::scoped_lock lock(mutex_); + implementation_type* impl = impl_list_; + while (impl) + { + close_for_destruction(*impl); + impl = impl->next_; + } +} + +void win_iocp_handle_service::construct( + win_iocp_handle_service::implementation_type& impl) +{ + impl.handle_ = INVALID_HANDLE_VALUE; + impl.safe_cancellation_thread_id_ = 0; + + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; +} + +void win_iocp_handle_service::move_construct( + win_iocp_handle_service::implementation_type& impl, + win_iocp_handle_service::implementation_type& other_impl) +{ + impl.handle_ = other_impl.handle_; + other_impl.handle_ = INVALID_HANDLE_VALUE; + + impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; + other_impl.safe_cancellation_thread_id_ = 0; + + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; +} + +void win_iocp_handle_service::move_assign( + win_iocp_handle_service::implementation_type& impl, + win_iocp_handle_service& other_service, + win_iocp_handle_service::implementation_type& other_impl) +{ + close_for_destruction(impl); + + if (this != &other_service) + { + // Remove implementation from linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; + } + + impl.handle_ = other_impl.handle_; + other_impl.handle_ = INVALID_HANDLE_VALUE; + + impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; + other_impl.safe_cancellation_thread_id_ = 0; + + if (this != &other_service) + { + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(other_service.mutex_); + impl.next_ = other_service.impl_list_; + impl.prev_ = 0; + if (other_service.impl_list_) + other_service.impl_list_->prev_ = &impl; + other_service.impl_list_ = &impl; + } +} + +void win_iocp_handle_service::destroy( + win_iocp_handle_service::implementation_type& impl) +{ + close_for_destruction(impl); + + // Remove implementation from linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; +} + +asio::error_code win_iocp_handle_service::assign( + win_iocp_handle_service::implementation_type& impl, + const native_handle_type& handle, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + if (iocp_service_.register_handle(handle, ec)) + return ec; + + impl.handle_ = handle; + ec = asio::error_code(); + return ec; +} + +asio::error_code win_iocp_handle_service::close( + win_iocp_handle_service::implementation_type& impl, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle", + &impl, reinterpret_cast<uintmax_t>(impl.handle_), "close")); + + if (!::CloseHandle(impl.handle_)) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + else + { + ec = asio::error_code(); + } + + impl.handle_ = INVALID_HANDLE_VALUE; + impl.safe_cancellation_thread_id_ = 0; + } + else + { + ec = asio::error_code(); + } + + return ec; +} + +asio::error_code win_iocp_handle_service::cancel( + win_iocp_handle_service::implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return ec; + } + + ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle", + &impl, reinterpret_cast<uintmax_t>(impl.handle_), "cancel")); + + if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( + ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) + { + // The version of Windows supports cancellation from any thread. + typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); + cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr; + if (!cancel_io_ex(impl.handle_, 0)) + { + DWORD last_error = ::GetLastError(); + if (last_error == ERROR_NOT_FOUND) + { + // ERROR_NOT_FOUND means that there were no operations to be + // cancelled. We swallow this error to match the behaviour on other + // platforms. + ec = asio::error_code(); + } + else + { + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + } + else + { + ec = asio::error_code(); + } + } + else if (impl.safe_cancellation_thread_id_ == 0) + { + // No operations have been started, so there's nothing to cancel. + ec = asio::error_code(); + } + else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) + { + // Asynchronous operations have been started from the current thread only, + // so it is safe to try to cancel them using CancelIo. + if (!::CancelIo(impl.handle_)) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + else + { + ec = asio::error_code(); + } + } + else + { + // Asynchronous operations have been started from more than one thread, + // so cancellation is not safe. + ec = asio::error::operation_not_supported; + } + + return ec; +} + +size_t win_iocp_handle_service::do_write( + win_iocp_handle_service::implementation_type& impl, uint64_t offset, + const asio::const_buffer& buffer, asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // A request to write 0 bytes on a handle is a no-op. + if (buffer.size() == 0) + { + ec = asio::error_code(); + return 0; + } + + overlapped_wrapper overlapped(ec); + if (ec) + { + return 0; + } + + // Write the data. + overlapped.Offset = offset & 0xFFFFFFFF; + overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; + BOOL ok = ::WriteFile(impl.handle_, buffer.data(), + static_cast<DWORD>(buffer.size()), 0, &overlapped); + if (!ok) + { + DWORD last_error = ::GetLastError(); + if (last_error != ERROR_IO_PENDING) + { + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return 0; + } + } + + // Wait for the operation to complete. + DWORD bytes_transferred = 0; + ok = ::GetOverlappedResult(impl.handle_, + &overlapped, &bytes_transferred, TRUE); + if (!ok) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return 0; + } + + ec = asio::error_code(); + return bytes_transferred; +} + +void win_iocp_handle_service::start_write_op( + win_iocp_handle_service::implementation_type& impl, uint64_t offset, + const asio::const_buffer& buffer, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (!is_open(impl)) + { + iocp_service_.on_completion(op, asio::error::bad_descriptor); + } + else if (buffer.size() == 0) + { + // A request to write 0 bytes on a handle is a no-op. + iocp_service_.on_completion(op); + } + else + { + DWORD bytes_transferred = 0; + op->Offset = offset & 0xFFFFFFFF; + op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; + BOOL ok = ::WriteFile(impl.handle_, buffer.data(), + static_cast<DWORD>(buffer.size()), + &bytes_transferred, op); + DWORD last_error = ::GetLastError(); + if (!ok && last_error != ERROR_IO_PENDING + && last_error != ERROR_MORE_DATA) + { + iocp_service_.on_completion(op, last_error, bytes_transferred); + } + else + { + iocp_service_.on_pending(op); + } + } +} + +size_t win_iocp_handle_service::do_read( + win_iocp_handle_service::implementation_type& impl, uint64_t offset, + const asio::mutable_buffer& buffer, asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return 0; + } + + // A request to read 0 bytes on a stream handle is a no-op. + if (buffer.size() == 0) + { + ec = asio::error_code(); + return 0; + } + + overlapped_wrapper overlapped(ec); + if (ec) + { + return 0; + } + + // Read some data. + overlapped.Offset = offset & 0xFFFFFFFF; + overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; + BOOL ok = ::ReadFile(impl.handle_, buffer.data(), + static_cast<DWORD>(buffer.size()), 0, &overlapped); + if (!ok) + { + DWORD last_error = ::GetLastError(); + if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) + { + if (last_error == ERROR_HANDLE_EOF) + { + ec = asio::error::eof; + } + else + { + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + return 0; + } + } + + // Wait for the operation to complete. + DWORD bytes_transferred = 0; + ok = ::GetOverlappedResult(impl.handle_, + &overlapped, &bytes_transferred, TRUE); + if (!ok) + { + DWORD last_error = ::GetLastError(); + if (last_error == ERROR_HANDLE_EOF) + { + ec = asio::error::eof; + } + else + { + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0; + } + + ec = asio::error_code(); + return bytes_transferred; +} + +void win_iocp_handle_service::start_read_op( + win_iocp_handle_service::implementation_type& impl, uint64_t offset, + const asio::mutable_buffer& buffer, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (!is_open(impl)) + { + iocp_service_.on_completion(op, asio::error::bad_descriptor); + } + else if (buffer.size() == 0) + { + // A request to read 0 bytes on a handle is a no-op. + iocp_service_.on_completion(op); + } + else + { + DWORD bytes_transferred = 0; + op->Offset = offset & 0xFFFFFFFF; + op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; + BOOL ok = ::ReadFile(impl.handle_, buffer.data(), + static_cast<DWORD>(buffer.size()), + &bytes_transferred, op); + DWORD last_error = ::GetLastError(); + if (!ok && last_error != ERROR_IO_PENDING + && last_error != ERROR_MORE_DATA) + { + iocp_service_.on_completion(op, last_error, bytes_transferred); + } + else + { + iocp_service_.on_pending(op); + } + } +} + +void win_iocp_handle_service::update_cancellation_thread_id( + win_iocp_handle_service::implementation_type& impl) +{ + if (impl.safe_cancellation_thread_id_ == 0) + impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); + else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) + impl.safe_cancellation_thread_id_ = ~DWORD(0); +} + +void win_iocp_handle_service::close_for_destruction(implementation_type& impl) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle", + &impl, reinterpret_cast<uintmax_t>(impl.handle_), "close")); + + ::CloseHandle(impl.handle_); + impl.handle_ = INVALID_HANDLE_VALUE; + impl.safe_cancellation_thread_id_ = 0; + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) + +#endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP diff --git a/lib/asio/detail/impl/win_iocp_io_context.hpp b/lib/asio/detail/impl/win_iocp_io_context.hpp new file mode 100644 index 0000000..44887d7 --- /dev/null +++ b/lib/asio/detail/impl/win_iocp_io_context.hpp @@ -0,0 +1,103 @@ +// +// detail/impl/win_iocp_io_context.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP +#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) + +#include "asio/detail/completion_handler.hpp" +#include "asio/detail/fenced_block.hpp" +#include "asio/detail/handler_alloc_helpers.hpp" +#include "asio/detail/handler_invoke_helpers.hpp" +#include "asio/detail/memory.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Time_Traits> +void win_iocp_io_context::add_timer_queue( + timer_queue<Time_Traits>& queue) +{ + do_add_timer_queue(queue); +} + +template <typename Time_Traits> +void win_iocp_io_context::remove_timer_queue( + timer_queue<Time_Traits>& queue) +{ + do_remove_timer_queue(queue); +} + +template <typename Time_Traits> +void win_iocp_io_context::schedule_timer(timer_queue<Time_Traits>& queue, + const typename Time_Traits::time_type& time, + typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op) +{ + // If the service has been shut down we silently discard the timer. + if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) + { + post_immediate_completion(op, false); + return; + } + + mutex::scoped_lock lock(dispatch_mutex_); + + bool earliest = queue.enqueue_timer(time, timer, op); + work_started(); + if (earliest) + update_timeout(); +} + +template <typename Time_Traits> +std::size_t win_iocp_io_context::cancel_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& timer, + std::size_t max_cancelled) +{ + // If the service has been shut down we silently ignore the cancellation. + if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) + return 0; + + mutex::scoped_lock lock(dispatch_mutex_); + op_queue<win_iocp_operation> ops; + std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); + post_deferred_completions(ops); + return n; +} + +template <typename Time_Traits> +void win_iocp_io_context::move_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& to, + typename timer_queue<Time_Traits>::per_timer_data& from) +{ + asio::detail::mutex::scoped_lock lock(dispatch_mutex_); + op_queue<operation> ops; + queue.cancel_timer(to, ops); + queue.move_timer(to, from); + lock.unlock(); + post_deferred_completions(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) + +#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP diff --git a/lib/asio/detail/impl/win_iocp_io_context.ipp b/lib/asio/detail/impl/win_iocp_io_context.ipp new file mode 100644 index 0000000..c371b86 --- /dev/null +++ b/lib/asio/detail/impl/win_iocp_io_context.ipp @@ -0,0 +1,554 @@ +// +// detail/impl/win_iocp_io_context.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP +#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) + +#include "asio/error.hpp" +#include "asio/detail/cstdint.hpp" +#include "asio/detail/handler_alloc_helpers.hpp" +#include "asio/detail/handler_invoke_helpers.hpp" +#include "asio/detail/limits.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/detail/win_iocp_io_context.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +struct win_iocp_io_context::work_finished_on_block_exit +{ + ~work_finished_on_block_exit() + { + io_context_->work_finished(); + } + + win_iocp_io_context* io_context_; +}; + +struct win_iocp_io_context::timer_thread_function +{ + void operator()() + { + while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0) + { + if (::WaitForSingleObject(io_context_->waitable_timer_.handle, + INFINITE) == WAIT_OBJECT_0) + { + ::InterlockedExchange(&io_context_->dispatch_required_, 1); + ::PostQueuedCompletionStatus(io_context_->iocp_.handle, + 0, wake_for_dispatch, 0); + } + } + } + + win_iocp_io_context* io_context_; +}; + +win_iocp_io_context::win_iocp_io_context( + asio::execution_context& ctx, int concurrency_hint) + : execution_context_service_base<win_iocp_io_context>(ctx), + iocp_(), + outstanding_work_(0), + stopped_(0), + stop_event_posted_(0), + shutdown_(0), + gqcs_timeout_(get_gqcs_timeout()), + dispatch_required_(0), + concurrency_hint_(concurrency_hint) +{ + ASIO_HANDLER_TRACKING_INIT; + + iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, + static_cast<DWORD>(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0))); + if (!iocp_.handle) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "iocp"); + } +} + +void win_iocp_io_context::shutdown() +{ + ::InterlockedExchange(&shutdown_, 1); + + if (timer_thread_.get()) + { + LARGE_INTEGER timeout; + timeout.QuadPart = 1; + ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE); + } + + while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0) + { + op_queue<win_iocp_operation> ops; + timer_queues_.get_all_timers(ops); + ops.push(completed_ops_); + if (!ops.empty()) + { + while (win_iocp_operation* op = ops.front()) + { + ops.pop(); + ::InterlockedDecrement(&outstanding_work_); + op->destroy(); + } + } + else + { + DWORD bytes_transferred = 0; + dword_ptr_t completion_key = 0; + LPOVERLAPPED overlapped = 0; + ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, + &completion_key, &overlapped, gqcs_timeout_); + if (overlapped) + { + ::InterlockedDecrement(&outstanding_work_); + static_cast<win_iocp_operation*>(overlapped)->destroy(); + } + } + } + + if (timer_thread_.get()) + timer_thread_->join(); +} + +asio::error_code win_iocp_io_context::register_handle( + HANDLE handle, asio::error_code& ec) +{ + if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + else + { + ec = asio::error_code(); + } + return ec; +} + +size_t win_iocp_io_context::run(asio::error_code& ec) +{ + if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) + { + stop(); + ec = asio::error_code(); + return 0; + } + + win_iocp_thread_info this_thread; + thread_call_stack::context ctx(this, this_thread); + + size_t n = 0; + while (do_one(INFINITE, ec)) + if (n != (std::numeric_limits<size_t>::max)()) + ++n; + return n; +} + +size_t win_iocp_io_context::run_one(asio::error_code& ec) +{ + if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) + { + stop(); + ec = asio::error_code(); + return 0; + } + + win_iocp_thread_info this_thread; + thread_call_stack::context ctx(this, this_thread); + + return do_one(INFINITE, ec); +} + +size_t win_iocp_io_context::wait_one(long usec, asio::error_code& ec) +{ + if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) + { + stop(); + ec = asio::error_code(); + return 0; + } + + win_iocp_thread_info this_thread; + thread_call_stack::context ctx(this, this_thread); + + return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), ec); +} + +size_t win_iocp_io_context::poll(asio::error_code& ec) +{ + if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) + { + stop(); + ec = asio::error_code(); + return 0; + } + + win_iocp_thread_info this_thread; + thread_call_stack::context ctx(this, this_thread); + + size_t n = 0; + while (do_one(0, ec)) + if (n != (std::numeric_limits<size_t>::max)()) + ++n; + return n; +} + +size_t win_iocp_io_context::poll_one(asio::error_code& ec) +{ + if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) + { + stop(); + ec = asio::error_code(); + return 0; + } + + win_iocp_thread_info this_thread; + thread_call_stack::context ctx(this, this_thread); + + return do_one(0, ec); +} + +void win_iocp_io_context::stop() +{ + if (::InterlockedExchange(&stopped_, 1) == 0) + { + if (::InterlockedExchange(&stop_event_posted_, 1) == 0) + { + if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "pqcs"); + } + } + } +} + +void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op) +{ + // Flag the operation as ready. + op->ready_ = 1; + + // Enqueue the operation on the I/O completion port. + if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) + { + // Out of resources. Put on completed queue instead. + mutex::scoped_lock lock(dispatch_mutex_); + completed_ops_.push(op); + ::InterlockedExchange(&dispatch_required_, 1); + } +} + +void win_iocp_io_context::post_deferred_completions( + op_queue<win_iocp_operation>& ops) +{ + while (win_iocp_operation* op = ops.front()) + { + ops.pop(); + + // Flag the operation as ready. + op->ready_ = 1; + + // Enqueue the operation on the I/O completion port. + if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) + { + // Out of resources. Put on completed queue instead. + mutex::scoped_lock lock(dispatch_mutex_); + completed_ops_.push(op); + completed_ops_.push(ops); + ::InterlockedExchange(&dispatch_required_, 1); + } + } +} + +void win_iocp_io_context::abandon_operations( + op_queue<win_iocp_operation>& ops) +{ + while (win_iocp_operation* op = ops.front()) + { + ops.pop(); + ::InterlockedDecrement(&outstanding_work_); + op->destroy(); + } +} + +void win_iocp_io_context::on_pending(win_iocp_operation* op) +{ + if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) + { + // Enqueue the operation on the I/O completion port. + if (!::PostQueuedCompletionStatus(iocp_.handle, + 0, overlapped_contains_result, op)) + { + // Out of resources. Put on completed queue instead. + mutex::scoped_lock lock(dispatch_mutex_); + completed_ops_.push(op); + ::InterlockedExchange(&dispatch_required_, 1); + } + } +} + +void win_iocp_io_context::on_completion(win_iocp_operation* op, + DWORD last_error, DWORD bytes_transferred) +{ + // Flag that the operation is ready for invocation. + op->ready_ = 1; + + // Store results in the OVERLAPPED structure. + op->Internal = reinterpret_cast<ulong_ptr_t>( + &asio::error::get_system_category()); + op->Offset = last_error; + op->OffsetHigh = bytes_transferred; + + // Enqueue the operation on the I/O completion port. + if (!::PostQueuedCompletionStatus(iocp_.handle, + 0, overlapped_contains_result, op)) + { + // Out of resources. Put on completed queue instead. + mutex::scoped_lock lock(dispatch_mutex_); + completed_ops_.push(op); + ::InterlockedExchange(&dispatch_required_, 1); + } +} + +void win_iocp_io_context::on_completion(win_iocp_operation* op, + const asio::error_code& ec, DWORD bytes_transferred) +{ + // Flag that the operation is ready for invocation. + op->ready_ = 1; + + // Store results in the OVERLAPPED structure. + op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category()); + op->Offset = ec.value(); + op->OffsetHigh = bytes_transferred; + + // Enqueue the operation on the I/O completion port. + if (!::PostQueuedCompletionStatus(iocp_.handle, + 0, overlapped_contains_result, op)) + { + // Out of resources. Put on completed queue instead. + mutex::scoped_lock lock(dispatch_mutex_); + completed_ops_.push(op); + ::InterlockedExchange(&dispatch_required_, 1); + } +} + +size_t win_iocp_io_context::do_one(DWORD msec, asio::error_code& ec) +{ + for (;;) + { + // Try to acquire responsibility for dispatching timers and completed ops. + if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1) + { + mutex::scoped_lock lock(dispatch_mutex_); + + // Dispatch pending timers and operations. + op_queue<win_iocp_operation> ops; + ops.push(completed_ops_); + timer_queues_.get_ready_timers(ops); + post_deferred_completions(ops); + update_timeout(); + } + + // Get the next operation from the queue. + DWORD bytes_transferred = 0; + dword_ptr_t completion_key = 0; + LPOVERLAPPED overlapped = 0; + ::SetLastError(0); + BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, + &bytes_transferred, &completion_key, &overlapped, + msec < gqcs_timeout_ ? msec : gqcs_timeout_); + DWORD last_error = ::GetLastError(); + + if (overlapped) + { + win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped); + asio::error_code result_ec(last_error, + asio::error::get_system_category()); + + // We may have been passed the last_error and bytes_transferred in the + // OVERLAPPED structure itself. + if (completion_key == overlapped_contains_result) + { + result_ec = asio::error_code(static_cast<int>(op->Offset), + *reinterpret_cast<asio::error_category*>(op->Internal)); + bytes_transferred = op->OffsetHigh; + } + + // Otherwise ensure any result has been saved into the OVERLAPPED + // structure. + else + { + op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category()); + op->Offset = result_ec.value(); + op->OffsetHigh = bytes_transferred; + } + + // Dispatch the operation only if ready. The operation may not be ready + // if the initiating function (e.g. a call to WSARecv) has not yet + // returned. This is because the initiating function still wants access + // to the operation's OVERLAPPED structure. + if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) + { + // Ensure the count of outstanding work is decremented on block exit. + work_finished_on_block_exit on_exit = { this }; + (void)on_exit; + + op->complete(this, result_ec, bytes_transferred); + ec = asio::error_code(); + return 1; + } + } + else if (!ok) + { + if (last_error != WAIT_TIMEOUT) + { + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return 0; + } + + // If we're waiting indefinitely we need to keep going until we get a + // real handler. + if (msec == INFINITE) + continue; + + ec = asio::error_code(); + return 0; + } + else if (completion_key == wake_for_dispatch) + { + // We have been woken up to try to acquire responsibility for dispatching + // timers and completed operations. + } + else + { + // Indicate that there is no longer an in-flight stop event. + ::InterlockedExchange(&stop_event_posted_, 0); + + // The stopped_ flag is always checked to ensure that any leftover + // stop events from a previous run invocation are ignored. + if (::InterlockedExchangeAdd(&stopped_, 0) != 0) + { + // Wake up next thread that is blocked on GetQueuedCompletionStatus. + if (::InterlockedExchange(&stop_event_posted_, 1) == 0) + { + if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) + { + last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return 0; + } + } + + ec = asio::error_code(); + return 0; + } + } + } +} + +DWORD win_iocp_io_context::get_gqcs_timeout() +{ + OSVERSIONINFOEX osvi; + ZeroMemory(&osvi, sizeof(osvi)); + osvi.dwOSVersionInfoSize = sizeof(osvi); + osvi.dwMajorVersion = 6ul; + + const uint64_t condition_mask = ::VerSetConditionMask( + 0, VER_MAJORVERSION, VER_GREATER_EQUAL); + + if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask)) + return INFINITE; + + return default_gqcs_timeout; +} + +void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(dispatch_mutex_); + + timer_queues_.insert(&queue); + + if (!waitable_timer_.handle) + { + waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0); + if (waitable_timer_.handle == 0) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "timer"); + } + + LARGE_INTEGER timeout; + timeout.QuadPart = -max_timeout_usec; + timeout.QuadPart *= 10; + ::SetWaitableTimer(waitable_timer_.handle, + &timeout, max_timeout_msec, 0, 0, FALSE); + } + + if (!timer_thread_.get()) + { + timer_thread_function thread_function = { this }; + timer_thread_.reset(new thread(thread_function, 65536)); + } +} + +void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(dispatch_mutex_); + + timer_queues_.erase(&queue); +} + +void win_iocp_io_context::update_timeout() +{ + if (timer_thread_.get()) + { + // There's no point updating the waitable timer if the new timeout period + // exceeds the maximum timeout. In that case, we might as well wait for the + // existing period of the timer to expire. + long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec); + if (timeout_usec < max_timeout_usec) + { + LARGE_INTEGER timeout; + timeout.QuadPart = -timeout_usec; + timeout.QuadPart *= 10; + ::SetWaitableTimer(waitable_timer_.handle, + &timeout, max_timeout_msec, 0, 0, FALSE); + } + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) + +#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP diff --git a/lib/asio/detail/impl/win_iocp_serial_port_service.ipp b/lib/asio/detail/impl/win_iocp_serial_port_service.ipp new file mode 100644 index 0000000..4a9b8cd --- /dev/null +++ b/lib/asio/detail/impl/win_iocp_serial_port_service.ipp @@ -0,0 +1,181 @@ +// +// detail/impl/win_iocp_serial_port_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP +#define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) + +#include <cstring> +#include "asio/detail/win_iocp_serial_port_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +win_iocp_serial_port_service::win_iocp_serial_port_service( + asio::io_context& io_context) + : service_base<win_iocp_serial_port_service>(io_context), + handle_service_(io_context) +{ +} + +void win_iocp_serial_port_service::shutdown() +{ +} + +asio::error_code win_iocp_serial_port_service::open( + win_iocp_serial_port_service::implementation_type& impl, + const std::string& device, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + // For convenience, add a leading \\.\ sequence if not already present. + std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device; + + // Open a handle to the serial port. + ::HANDLE handle = ::CreateFileA(name.c_str(), + GENERIC_READ | GENERIC_WRITE, 0, 0, + OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0); + if (handle == INVALID_HANDLE_VALUE) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + // Determine the initial serial port parameters. + using namespace std; // For memset. + ::DCB dcb; + memset(&dcb, 0, sizeof(DCB)); + dcb.DCBlength = sizeof(DCB); + if (!::GetCommState(handle, &dcb)) + { + DWORD last_error = ::GetLastError(); + ::CloseHandle(handle); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + // Set some default serial port parameters. This implementation does not + // support changing these, so they might as well be in a known state. + dcb.fBinary = TRUE; // Win32 only supports binary mode. + dcb.fDsrSensitivity = FALSE; + dcb.fNull = FALSE; // Do not ignore NULL characters. + dcb.fAbortOnError = FALSE; // Ignore serial framing errors. + if (!::SetCommState(handle, &dcb)) + { + DWORD last_error = ::GetLastError(); + ::CloseHandle(handle); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + // Set up timeouts so that the serial port will behave similarly to a + // network socket. Reads wait for at least one byte, then return with + // whatever they have. Writes return once everything is out the door. + ::COMMTIMEOUTS timeouts; + timeouts.ReadIntervalTimeout = 1; + timeouts.ReadTotalTimeoutMultiplier = 0; + timeouts.ReadTotalTimeoutConstant = 0; + timeouts.WriteTotalTimeoutMultiplier = 0; + timeouts.WriteTotalTimeoutConstant = 0; + if (!::SetCommTimeouts(handle, &timeouts)) + { + DWORD last_error = ::GetLastError(); + ::CloseHandle(handle); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + // We're done. Take ownership of the serial port handle. + if (handle_service_.assign(impl, handle, ec)) + ::CloseHandle(handle); + return ec; +} + +asio::error_code win_iocp_serial_port_service::do_set_option( + win_iocp_serial_port_service::implementation_type& impl, + win_iocp_serial_port_service::store_function_type store, + const void* option, asio::error_code& ec) +{ + using namespace std; // For memcpy. + + ::DCB dcb; + memset(&dcb, 0, sizeof(DCB)); + dcb.DCBlength = sizeof(DCB); + if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + if (store(option, dcb, ec)) + return ec; + + if (!::SetCommState(handle_service_.native_handle(impl), &dcb)) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + ec = asio::error_code(); + return ec; +} + +asio::error_code win_iocp_serial_port_service::do_get_option( + const win_iocp_serial_port_service::implementation_type& impl, + win_iocp_serial_port_service::load_function_type load, + void* option, asio::error_code& ec) const +{ + using namespace std; // For memset. + + ::DCB dcb; + memset(&dcb, 0, sizeof(DCB)); + dcb.DCBlength = sizeof(DCB); + if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + return ec; + } + + return load(option, dcb, ec); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) + +#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP diff --git a/lib/asio/detail/impl/win_iocp_socket_service_base.ipp b/lib/asio/detail/impl/win_iocp_socket_service_base.ipp new file mode 100644 index 0000000..6c478cd --- /dev/null +++ b/lib/asio/detail/impl/win_iocp_socket_service_base.ipp @@ -0,0 +1,799 @@ +// +// detail/impl/win_iocp_socket_service_base.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP +#define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_IOCP) + +#include "asio/detail/win_iocp_socket_service_base.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +win_iocp_socket_service_base::win_iocp_socket_service_base( + asio::io_context& io_context) + : io_context_(io_context), + iocp_service_(use_service<win_iocp_io_context>(io_context)), + reactor_(0), + connect_ex_(0), + nt_set_info_(0), + mutex_(), + impl_list_(0) +{ +} + +void win_iocp_socket_service_base::base_shutdown() +{ + // Close all implementations, causing all operations to complete. + asio::detail::mutex::scoped_lock lock(mutex_); + base_implementation_type* impl = impl_list_; + while (impl) + { + close_for_destruction(*impl); + impl = impl->next_; + } +} + +void win_iocp_socket_service_base::construct( + win_iocp_socket_service_base::base_implementation_type& impl) +{ + impl.socket_ = invalid_socket; + impl.state_ = 0; + impl.cancel_token_.reset(); +#if defined(ASIO_ENABLE_CANCELIO) + impl.safe_cancellation_thread_id_ = 0; +#endif // defined(ASIO_ENABLE_CANCELIO) + + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; +} + +void win_iocp_socket_service_base::base_move_construct( + win_iocp_socket_service_base::base_implementation_type& impl, + win_iocp_socket_service_base::base_implementation_type& other_impl) +{ + impl.socket_ = other_impl.socket_; + other_impl.socket_ = invalid_socket; + + impl.state_ = other_impl.state_; + other_impl.state_ = 0; + + impl.cancel_token_ = other_impl.cancel_token_; + other_impl.cancel_token_.reset(); + +#if defined(ASIO_ENABLE_CANCELIO) + impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; + other_impl.safe_cancellation_thread_id_ = 0; +#endif // defined(ASIO_ENABLE_CANCELIO) + + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; +} + +void win_iocp_socket_service_base::base_move_assign( + win_iocp_socket_service_base::base_implementation_type& impl, + win_iocp_socket_service_base& other_service, + win_iocp_socket_service_base::base_implementation_type& other_impl) +{ + close_for_destruction(impl); + + if (this != &other_service) + { + // Remove implementation from linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; + } + + impl.socket_ = other_impl.socket_; + other_impl.socket_ = invalid_socket; + + impl.state_ = other_impl.state_; + other_impl.state_ = 0; + + impl.cancel_token_ = other_impl.cancel_token_; + other_impl.cancel_token_.reset(); + +#if defined(ASIO_ENABLE_CANCELIO) + impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; + other_impl.safe_cancellation_thread_id_ = 0; +#endif // defined(ASIO_ENABLE_CANCELIO) + + if (this != &other_service) + { + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(other_service.mutex_); + impl.next_ = other_service.impl_list_; + impl.prev_ = 0; + if (other_service.impl_list_) + other_service.impl_list_->prev_ = &impl; + other_service.impl_list_ = &impl; + } +} + +void win_iocp_socket_service_base::destroy( + win_iocp_socket_service_base::base_implementation_type& impl) +{ + close_for_destruction(impl); + + // Remove implementation from linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; +} + +asio::error_code win_iocp_socket_service_base::close( + win_iocp_socket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((iocp_service_.context(), + "socket", &impl, impl.socket_, "close")); + + // Check if the reactor was created, in which case we need to close the + // socket on the reactor as well to cancel any operations that might be + // running there. + select_reactor* r = static_cast<select_reactor*>( + interlocked_compare_exchange_pointer( + reinterpret_cast<void**>(&reactor_), 0, 0)); + if (r) + r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); + + socket_ops::close(impl.socket_, impl.state_, false, ec); + + if (r) + r->cleanup_descriptor_data(impl.reactor_data_); + } + else + { + ec = asio::error_code(); + } + + impl.socket_ = invalid_socket; + impl.state_ = 0; + impl.cancel_token_.reset(); +#if defined(ASIO_ENABLE_CANCELIO) + impl.safe_cancellation_thread_id_ = 0; +#endif // defined(ASIO_ENABLE_CANCELIO) + + return ec; +} + +socket_type win_iocp_socket_service_base::release( + win_iocp_socket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + return invalid_socket; + + cancel(impl, ec); + if (ec) + return invalid_socket; + + nt_set_info_fn fn = get_nt_set_info(); + if (fn == 0) + { + ec = asio::error::operation_not_supported; + return invalid_socket; + } + + HANDLE sock_as_handle = reinterpret_cast<HANDLE>(impl.socket_); + ULONG_PTR iosb[2] = { 0, 0 }; + void* info[2] = { 0, 0 }; + if (fn(sock_as_handle, iosb, &info, sizeof(info), + 61 /* FileReplaceCompletionInformation */)) + { + ec = asio::error::operation_not_supported; + return invalid_socket; + } + + socket_type tmp = impl.socket_; + impl.socket_ = invalid_socket; + return tmp; +} + +asio::error_code win_iocp_socket_service_base::cancel( + win_iocp_socket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return ec; + } + + ASIO_HANDLER_OPERATION((iocp_service_.context(), + "socket", &impl, impl.socket_, "cancel")); + + if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( + ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) + { + // The version of Windows supports cancellation from any thread. + typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); + cancel_io_ex_t cancel_io_ex = (cancel_io_ex_t)cancel_io_ex_ptr; + socket_type sock = impl.socket_; + HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock); + if (!cancel_io_ex(sock_as_handle, 0)) + { + DWORD last_error = ::GetLastError(); + if (last_error == ERROR_NOT_FOUND) + { + // ERROR_NOT_FOUND means that there were no operations to be + // cancelled. We swallow this error to match the behaviour on other + // platforms. + ec = asio::error_code(); + } + else + { + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + } + else + { + ec = asio::error_code(); + } + } +#if defined(ASIO_ENABLE_CANCELIO) + else if (impl.safe_cancellation_thread_id_ == 0) + { + // No operations have been started, so there's nothing to cancel. + ec = asio::error_code(); + } + else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) + { + // Asynchronous operations have been started from the current thread only, + // so it is safe to try to cancel them using CancelIo. + socket_type sock = impl.socket_; + HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock); + if (!::CancelIo(sock_as_handle)) + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + else + { + ec = asio::error_code(); + } + } + else + { + // Asynchronous operations have been started from more than one thread, + // so cancellation is not safe. + ec = asio::error::operation_not_supported; + } +#else // defined(ASIO_ENABLE_CANCELIO) + else + { + // Cancellation is not supported as CancelIo may not be used. + ec = asio::error::operation_not_supported; + } +#endif // defined(ASIO_ENABLE_CANCELIO) + + // Cancel any operations started via the reactor. + if (!ec) + { + select_reactor* r = static_cast<select_reactor*>( + interlocked_compare_exchange_pointer( + reinterpret_cast<void**>(&reactor_), 0, 0)); + if (r) + r->cancel_ops(impl.socket_, impl.reactor_data_); + } + + return ec; +} + +asio::error_code win_iocp_socket_service_base::do_open( + win_iocp_socket_service_base::base_implementation_type& impl, + int family, int type, int protocol, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + socket_holder sock(socket_ops::socket(family, type, protocol, ec)); + if (sock.get() == invalid_socket) + return ec; + + HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock.get()); + if (iocp_service_.register_handle(sock_as_handle, ec)) + return ec; + + impl.socket_ = sock.release(); + switch (type) + { + case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; + case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; + default: impl.state_ = 0; break; + } + impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter()); + ec = asio::error_code(); + return ec; +} + +asio::error_code win_iocp_socket_service_base::do_assign( + win_iocp_socket_service_base::base_implementation_type& impl, + int type, socket_type native_socket, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + HANDLE sock_as_handle = reinterpret_cast<HANDLE>(native_socket); + if (iocp_service_.register_handle(sock_as_handle, ec)) + return ec; + + impl.socket_ = native_socket; + switch (type) + { + case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; + case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; + default: impl.state_ = 0; break; + } + impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter()); + ec = asio::error_code(); + return ec; +} + +void win_iocp_socket_service_base::start_send_op( + win_iocp_socket_service_base::base_implementation_type& impl, + WSABUF* buffers, std::size_t buffer_count, + socket_base::message_flags flags, bool noop, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (noop) + iocp_service_.on_completion(op); + else if (!is_open(impl)) + iocp_service_.on_completion(op, asio::error::bad_descriptor); + else + { + DWORD bytes_transferred = 0; + int result = ::WSASend(impl.socket_, buffers, + static_cast<DWORD>(buffer_count), &bytes_transferred, flags, op, 0); + DWORD last_error = ::WSAGetLastError(); + if (last_error == ERROR_PORT_UNREACHABLE) + last_error = WSAECONNREFUSED; + if (result != 0 && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error, bytes_transferred); + else + iocp_service_.on_pending(op); + } +} + +void win_iocp_socket_service_base::start_send_to_op( + win_iocp_socket_service_base::base_implementation_type& impl, + WSABUF* buffers, std::size_t buffer_count, + const socket_addr_type* addr, int addrlen, + socket_base::message_flags flags, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (!is_open(impl)) + iocp_service_.on_completion(op, asio::error::bad_descriptor); + else + { + DWORD bytes_transferred = 0; + int result = ::WSASendTo(impl.socket_, buffers, + static_cast<DWORD>(buffer_count), + &bytes_transferred, flags, addr, addrlen, op, 0); + DWORD last_error = ::WSAGetLastError(); + if (last_error == ERROR_PORT_UNREACHABLE) + last_error = WSAECONNREFUSED; + if (result != 0 && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error, bytes_transferred); + else + iocp_service_.on_pending(op); + } +} + +void win_iocp_socket_service_base::start_receive_op( + win_iocp_socket_service_base::base_implementation_type& impl, + WSABUF* buffers, std::size_t buffer_count, + socket_base::message_flags flags, bool noop, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (noop) + iocp_service_.on_completion(op); + else if (!is_open(impl)) + iocp_service_.on_completion(op, asio::error::bad_descriptor); + else + { + DWORD bytes_transferred = 0; + DWORD recv_flags = flags; + int result = ::WSARecv(impl.socket_, buffers, + static_cast<DWORD>(buffer_count), + &bytes_transferred, &recv_flags, op, 0); + DWORD last_error = ::WSAGetLastError(); + if (last_error == ERROR_NETNAME_DELETED) + last_error = WSAECONNRESET; + else if (last_error == ERROR_PORT_UNREACHABLE) + last_error = WSAECONNREFUSED; + if (result != 0 && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error, bytes_transferred); + else + iocp_service_.on_pending(op); + } +} + +void win_iocp_socket_service_base::start_null_buffers_receive_op( + win_iocp_socket_service_base::base_implementation_type& impl, + socket_base::message_flags flags, reactor_op* op) +{ + if ((impl.state_ & socket_ops::stream_oriented) != 0) + { + // For stream sockets on Windows, we may issue a 0-byte overlapped + // WSARecv to wait until there is data available on the socket. + ::WSABUF buf = { 0, 0 }; + start_receive_op(impl, &buf, 1, flags, false, op); + } + else + { + start_reactor_op(impl, + (flags & socket_base::message_out_of_band) + ? select_reactor::except_op : select_reactor::read_op, + op); + } +} + +void win_iocp_socket_service_base::start_receive_from_op( + win_iocp_socket_service_base::base_implementation_type& impl, + WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, + socket_base::message_flags flags, int* addrlen, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (!is_open(impl)) + iocp_service_.on_completion(op, asio::error::bad_descriptor); + else + { + DWORD bytes_transferred = 0; + DWORD recv_flags = flags; + int result = ::WSARecvFrom(impl.socket_, buffers, + static_cast<DWORD>(buffer_count), + &bytes_transferred, &recv_flags, addr, addrlen, op, 0); + DWORD last_error = ::WSAGetLastError(); + if (last_error == ERROR_PORT_UNREACHABLE) + last_error = WSAECONNREFUSED; + if (result != 0 && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error, bytes_transferred); + else + iocp_service_.on_pending(op); + } +} + +void win_iocp_socket_service_base::start_accept_op( + win_iocp_socket_service_base::base_implementation_type& impl, + bool peer_is_open, socket_holder& new_socket, int family, int type, + int protocol, void* output_buffer, DWORD address_length, operation* op) +{ + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + if (!is_open(impl)) + iocp_service_.on_completion(op, asio::error::bad_descriptor); + else if (peer_is_open) + iocp_service_.on_completion(op, asio::error::already_open); + else + { + asio::error_code ec; + new_socket.reset(socket_ops::socket(family, type, protocol, ec)); + if (new_socket.get() == invalid_socket) + iocp_service_.on_completion(op, ec); + else + { + DWORD bytes_read = 0; + BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer, + 0, address_length, address_length, &bytes_read, op); + DWORD last_error = ::WSAGetLastError(); + if (!result && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error); + else + iocp_service_.on_pending(op); + } + } +} + +void win_iocp_socket_service_base::restart_accept_op( + socket_type s, socket_holder& new_socket, int family, int type, + int protocol, void* output_buffer, DWORD address_length, operation* op) +{ + new_socket.reset(); + iocp_service_.work_started(); + + asio::error_code ec; + new_socket.reset(socket_ops::socket(family, type, protocol, ec)); + if (new_socket.get() == invalid_socket) + iocp_service_.on_completion(op, ec); + else + { + DWORD bytes_read = 0; + BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer, + 0, address_length, address_length, &bytes_read, op); + DWORD last_error = ::WSAGetLastError(); + if (!result && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error); + else + iocp_service_.on_pending(op); + } +} + +void win_iocp_socket_service_base::start_reactor_op( + win_iocp_socket_service_base::base_implementation_type& impl, + int op_type, reactor_op* op) +{ + select_reactor& r = get_reactor(); + update_cancellation_thread_id(impl); + + if (is_open(impl)) + { + r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false); + return; + } + else + op->ec_ = asio::error::bad_descriptor; + + iocp_service_.post_immediate_completion(op, false); +} + +void win_iocp_socket_service_base::start_connect_op( + win_iocp_socket_service_base::base_implementation_type& impl, + int family, int type, const socket_addr_type* addr, + std::size_t addrlen, win_iocp_socket_connect_op_base* op) +{ + // If ConnectEx is available, use that. + if (family == ASIO_OS_DEF(AF_INET) + || family == ASIO_OS_DEF(AF_INET6)) + { + if (connect_ex_fn connect_ex = get_connect_ex(impl, type)) + { + union address_union + { + socket_addr_type base; + sockaddr_in4_type v4; + sockaddr_in6_type v6; + } a; + + using namespace std; // For memset. + memset(&a, 0, sizeof(a)); + a.base.sa_family = family; + + socket_ops::bind(impl.socket_, &a.base, + family == ASIO_OS_DEF(AF_INET) + ? sizeof(a.v4) : sizeof(a.v6), op->ec_); + if (op->ec_ && op->ec_ != asio::error::invalid_argument) + { + iocp_service_.post_immediate_completion(op, false); + return; + } + + op->connect_ex_ = true; + update_cancellation_thread_id(impl); + iocp_service_.work_started(); + + BOOL result = connect_ex(impl.socket_, + addr, static_cast<int>(addrlen), 0, 0, 0, op); + DWORD last_error = ::WSAGetLastError(); + if (!result && last_error != WSA_IO_PENDING) + iocp_service_.on_completion(op, last_error); + else + iocp_service_.on_pending(op); + return; + } + } + + // Otherwise, fall back to a reactor-based implementation. + select_reactor& r = get_reactor(); + update_cancellation_thread_id(impl); + + if ((impl.state_ & socket_ops::non_blocking) != 0 + || socket_ops::set_internal_non_blocking( + impl.socket_, impl.state_, true, op->ec_)) + { + if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) + { + if (op->ec_ == asio::error::in_progress + || op->ec_ == asio::error::would_block) + { + op->ec_ = asio::error_code(); + r.start_op(select_reactor::connect_op, impl.socket_, + impl.reactor_data_, op, false, false); + return; + } + } + } + + r.post_immediate_completion(op, false); +} + +void win_iocp_socket_service_base::close_for_destruction( + win_iocp_socket_service_base::base_implementation_type& impl) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((iocp_service_.context(), + "socket", &impl, impl.socket_, "close")); + + // Check if the reactor was created, in which case we need to close the + // socket on the reactor as well to cancel any operations that might be + // running there. + select_reactor* r = static_cast<select_reactor*>( + interlocked_compare_exchange_pointer( + reinterpret_cast<void**>(&reactor_), 0, 0)); + if (r) + r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); + + asio::error_code ignored_ec; + socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); + + if (r) + r->cleanup_descriptor_data(impl.reactor_data_); + } + + impl.socket_ = invalid_socket; + impl.state_ = 0; + impl.cancel_token_.reset(); +#if defined(ASIO_ENABLE_CANCELIO) + impl.safe_cancellation_thread_id_ = 0; +#endif // defined(ASIO_ENABLE_CANCELIO) +} + +void win_iocp_socket_service_base::update_cancellation_thread_id( + win_iocp_socket_service_base::base_implementation_type& impl) +{ +#if defined(ASIO_ENABLE_CANCELIO) + if (impl.safe_cancellation_thread_id_ == 0) + impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); + else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) + impl.safe_cancellation_thread_id_ = ~DWORD(0); +#else // defined(ASIO_ENABLE_CANCELIO) + (void)impl; +#endif // defined(ASIO_ENABLE_CANCELIO) +} + +select_reactor& win_iocp_socket_service_base::get_reactor() +{ + select_reactor* r = static_cast<select_reactor*>( + interlocked_compare_exchange_pointer( + reinterpret_cast<void**>(&reactor_), 0, 0)); + if (!r) + { + r = &(use_service<select_reactor>(io_context_)); + interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r); + } + return *r; +} + +win_iocp_socket_service_base::connect_ex_fn +win_iocp_socket_service_base::get_connect_ex( + win_iocp_socket_service_base::base_implementation_type& impl, int type) +{ +#if defined(ASIO_DISABLE_CONNECTEX) + (void)impl; + (void)type; + return 0; +#else // defined(ASIO_DISABLE_CONNECTEX) + if (type != ASIO_OS_DEF(SOCK_STREAM) + && type != ASIO_OS_DEF(SOCK_SEQPACKET)) + return 0; + + void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0); + if (!ptr) + { + GUID guid = { 0x25a207b9, 0xddf3, 0x4660, + { 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } }; + + DWORD bytes = 0; + if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER, + &guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0) + { + // Set connect_ex_ to a special value to indicate that ConnectEx is + // unavailable. That way we won't bother trying to look it up again. + ptr = this; + } + + interlocked_exchange_pointer(&connect_ex_, ptr); + } + + return reinterpret_cast<connect_ex_fn>(ptr == this ? 0 : ptr); +#endif // defined(ASIO_DISABLE_CONNECTEX) +} + +win_iocp_socket_service_base::nt_set_info_fn +win_iocp_socket_service_base::get_nt_set_info() +{ + void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0); + if (!ptr) + { + if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL")) + ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile")); + + // On failure, set nt_set_info_ to a special value to indicate that the + // NtSetInformationFile function is unavailable. That way we won't bother + // trying to look it up again. + interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this); + } + + return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr); +} + +void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer( + void** dest, void* exch, void* cmp) +{ +#if defined(_M_IX86) + return reinterpret_cast<void*>(InterlockedCompareExchange( + reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch), + reinterpret_cast<LONG>(cmp))); +#else + return InterlockedCompareExchangePointer(dest, exch, cmp); +#endif +} + +void* win_iocp_socket_service_base::interlocked_exchange_pointer( + void** dest, void* val) +{ +#if defined(_M_IX86) + return reinterpret_cast<void*>(InterlockedExchange( + reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val))); +#else + return InterlockedExchangePointer(dest, val); +#endif +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_IOCP) + +#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP diff --git a/lib/asio/detail/impl/win_mutex.ipp b/lib/asio/detail/impl/win_mutex.ipp new file mode 100644 index 0000000..dc58a12 --- /dev/null +++ b/lib/asio/detail/impl/win_mutex.ipp @@ -0,0 +1,84 @@ +// +// detail/impl/win_mutex.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP +#define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS) + +#include "asio/detail/throw_error.hpp" +#include "asio/detail/win_mutex.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +win_mutex::win_mutex() +{ + int error = do_init(); + asio::error_code ec(error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "mutex"); +} + +int win_mutex::do_init() +{ +#if defined(__MINGW32__) + // Not sure if MinGW supports structured exception handling, so for now + // we'll just call the Windows API and hope. +# if defined(UNDER_CE) + ::InitializeCriticalSection(&crit_section_); +# elif defined(ASIO_WINDOWS_APP) + if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) + return ::GetLastError(); +# else + if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) + return ::GetLastError(); +# endif + return 0; +#else + __try + { +# if defined(UNDER_CE) + ::InitializeCriticalSection(&crit_section_); +# elif defined(ASIO_WINDOWS_APP) + if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) + return ::GetLastError(); +# else + if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) + return ::GetLastError(); +# endif + } + __except(GetExceptionCode() == STATUS_NO_MEMORY + ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) + { + return ERROR_OUTOFMEMORY; + } + + return 0; +#endif +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) + +#endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP diff --git a/lib/asio/detail/impl/win_object_handle_service.ipp b/lib/asio/detail/impl/win_object_handle_service.ipp new file mode 100644 index 0000000..c5e59d1 --- /dev/null +++ b/lib/asio/detail/impl/win_object_handle_service.ipp @@ -0,0 +1,449 @@ +// +// detail/impl/win_object_handle_service.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// Copyright (c) 2011 Boris Schaeling (boris@highscore.de) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP +#define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) + +#include "asio/detail/win_object_handle_service.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +win_object_handle_service::win_object_handle_service( + asio::io_context& io_context) + : service_base<win_object_handle_service>(io_context), + io_context_(asio::use_service<io_context_impl>(io_context)), + mutex_(), + impl_list_(0), + shutdown_(false) +{ +} + +void win_object_handle_service::shutdown() +{ + mutex::scoped_lock lock(mutex_); + + // Setting this flag to true prevents new objects from being registered, and + // new asynchronous wait operations from being started. We only need to worry + // about cleaning up the operations that are currently in progress. + shutdown_ = true; + + op_queue<operation> ops; + for (implementation_type* impl = impl_list_; impl; impl = impl->next_) + ops.push(impl->op_queue_); + + lock.unlock(); + + io_context_.abandon_operations(ops); +} + +void win_object_handle_service::construct( + win_object_handle_service::implementation_type& impl) +{ + impl.handle_ = INVALID_HANDLE_VALUE; + impl.wait_handle_ = INVALID_HANDLE_VALUE; + impl.owner_ = this; + + // Insert implementation into linked list of all implementations. + mutex::scoped_lock lock(mutex_); + if (!shutdown_) + { + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; + } +} + +void win_object_handle_service::move_construct( + win_object_handle_service::implementation_type& impl, + win_object_handle_service::implementation_type& other_impl) +{ + mutex::scoped_lock lock(mutex_); + + // Insert implementation into linked list of all implementations. + if (!shutdown_) + { + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; + } + + impl.handle_ = other_impl.handle_; + other_impl.handle_ = INVALID_HANDLE_VALUE; + impl.wait_handle_ = other_impl.wait_handle_; + other_impl.wait_handle_ = INVALID_HANDLE_VALUE; + impl.op_queue_.push(other_impl.op_queue_); + impl.owner_ = this; + + // We must not hold the lock while calling UnregisterWaitEx. This is because + // the registered callback function might be invoked while we are waiting for + // UnregisterWaitEx to complete. + lock.unlock(); + + if (impl.wait_handle_ != INVALID_HANDLE_VALUE) + ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); + + if (!impl.op_queue_.empty()) + register_wait_callback(impl, lock); +} + +void win_object_handle_service::move_assign( + win_object_handle_service::implementation_type& impl, + win_object_handle_service& other_service, + win_object_handle_service::implementation_type& other_impl) +{ + asio::error_code ignored_ec; + close(impl, ignored_ec); + + mutex::scoped_lock lock(mutex_); + + if (this != &other_service) + { + // Remove implementation from linked list of all implementations. + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; + } + + impl.handle_ = other_impl.handle_; + other_impl.handle_ = INVALID_HANDLE_VALUE; + impl.wait_handle_ = other_impl.wait_handle_; + other_impl.wait_handle_ = INVALID_HANDLE_VALUE; + impl.op_queue_.push(other_impl.op_queue_); + impl.owner_ = this; + + if (this != &other_service) + { + // Insert implementation into linked list of all implementations. + impl.next_ = other_service.impl_list_; + impl.prev_ = 0; + if (other_service.impl_list_) + other_service.impl_list_->prev_ = &impl; + other_service.impl_list_ = &impl; + } + + // We must not hold the lock while calling UnregisterWaitEx. This is because + // the registered callback function might be invoked while we are waiting for + // UnregisterWaitEx to complete. + lock.unlock(); + + if (impl.wait_handle_ != INVALID_HANDLE_VALUE) + ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); + + if (!impl.op_queue_.empty()) + register_wait_callback(impl, lock); +} + +void win_object_handle_service::destroy( + win_object_handle_service::implementation_type& impl) +{ + mutex::scoped_lock lock(mutex_); + + // Remove implementation from linked list of all implementations. + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; + + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((io_context_.context(), "object_handle", + &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close")); + + HANDLE wait_handle = impl.wait_handle_; + impl.wait_handle_ = INVALID_HANDLE_VALUE; + + op_queue<operation> ops; + while (wait_op* op = impl.op_queue_.front()) + { + op->ec_ = asio::error::operation_aborted; + impl.op_queue_.pop(); + ops.push(op); + } + + // We must not hold the lock while calling UnregisterWaitEx. This is + // because the registered callback function might be invoked while we are + // waiting for UnregisterWaitEx to complete. + lock.unlock(); + + if (wait_handle != INVALID_HANDLE_VALUE) + ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); + + ::CloseHandle(impl.handle_); + impl.handle_ = INVALID_HANDLE_VALUE; + + io_context_.post_deferred_completions(ops); + } +} + +asio::error_code win_object_handle_service::assign( + win_object_handle_service::implementation_type& impl, + const native_handle_type& handle, asio::error_code& ec) +{ + if (is_open(impl)) + { + ec = asio::error::already_open; + return ec; + } + + impl.handle_ = handle; + ec = asio::error_code(); + return ec; +} + +asio::error_code win_object_handle_service::close( + win_object_handle_service::implementation_type& impl, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((io_context_.context(), "object_handle", + &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close")); + + mutex::scoped_lock lock(mutex_); + + HANDLE wait_handle = impl.wait_handle_; + impl.wait_handle_ = INVALID_HANDLE_VALUE; + + op_queue<operation> completed_ops; + while (wait_op* op = impl.op_queue_.front()) + { + impl.op_queue_.pop(); + op->ec_ = asio::error::operation_aborted; + completed_ops.push(op); + } + + // We must not hold the lock while calling UnregisterWaitEx. This is + // because the registered callback function might be invoked while we are + // waiting for UnregisterWaitEx to complete. + lock.unlock(); + + if (wait_handle != INVALID_HANDLE_VALUE) + ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); + + if (::CloseHandle(impl.handle_)) + { + impl.handle_ = INVALID_HANDLE_VALUE; + ec = asio::error_code(); + } + else + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + } + + io_context_.post_deferred_completions(completed_ops); + } + else + { + ec = asio::error_code(); + } + + return ec; +} + +asio::error_code win_object_handle_service::cancel( + win_object_handle_service::implementation_type& impl, + asio::error_code& ec) +{ + if (is_open(impl)) + { + ASIO_HANDLER_OPERATION((io_context_.context(), "object_handle", + &impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "cancel")); + + mutex::scoped_lock lock(mutex_); + + HANDLE wait_handle = impl.wait_handle_; + impl.wait_handle_ = INVALID_HANDLE_VALUE; + + op_queue<operation> completed_ops; + while (wait_op* op = impl.op_queue_.front()) + { + op->ec_ = asio::error::operation_aborted; + impl.op_queue_.pop(); + completed_ops.push(op); + } + + // We must not hold the lock while calling UnregisterWaitEx. This is + // because the registered callback function might be invoked while we are + // waiting for UnregisterWaitEx to complete. + lock.unlock(); + + if (wait_handle != INVALID_HANDLE_VALUE) + ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); + + ec = asio::error_code(); + + io_context_.post_deferred_completions(completed_ops); + } + else + { + ec = asio::error::bad_descriptor; + } + + return ec; +} + +void win_object_handle_service::wait( + win_object_handle_service::implementation_type& impl, + asio::error_code& ec) +{ + switch (::WaitForSingleObject(impl.handle_, INFINITE)) + { + case WAIT_FAILED: + { + DWORD last_error = ::GetLastError(); + ec = asio::error_code(last_error, + asio::error::get_system_category()); + break; + } + case WAIT_OBJECT_0: + case WAIT_ABANDONED: + default: + ec = asio::error_code(); + break; + } +} + +void win_object_handle_service::start_wait_op( + win_object_handle_service::implementation_type& impl, wait_op* op) +{ + io_context_.work_started(); + + if (is_open(impl)) + { + mutex::scoped_lock lock(mutex_); + + if (!shutdown_) + { + impl.op_queue_.push(op); + + // Only the first operation to be queued gets to register a wait callback. + // Subsequent operations have to wait for the first to finish. + if (impl.op_queue_.front() == op) + register_wait_callback(impl, lock); + } + else + { + lock.unlock(); + io_context_.post_deferred_completion(op); + } + } + else + { + op->ec_ = asio::error::bad_descriptor; + io_context_.post_deferred_completion(op); + } +} + +void win_object_handle_service::register_wait_callback( + win_object_handle_service::implementation_type& impl, + mutex::scoped_lock& lock) +{ + lock.lock(); + + if (!RegisterWaitForSingleObject(&impl.wait_handle_, + impl.handle_, &win_object_handle_service::wait_callback, + &impl, INFINITE, WT_EXECUTEONLYONCE)) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + + op_queue<operation> completed_ops; + while (wait_op* op = impl.op_queue_.front()) + { + op->ec_ = ec; + impl.op_queue_.pop(); + completed_ops.push(op); + } + + lock.unlock(); + io_context_.post_deferred_completions(completed_ops); + } +} + +void win_object_handle_service::wait_callback(PVOID param, BOOLEAN) +{ + implementation_type* impl = static_cast<implementation_type*>(param); + mutex::scoped_lock lock(impl->owner_->mutex_); + + if (impl->wait_handle_ != INVALID_HANDLE_VALUE) + { + ::UnregisterWaitEx(impl->wait_handle_, NULL); + impl->wait_handle_ = INVALID_HANDLE_VALUE; + } + + if (wait_op* op = impl->op_queue_.front()) + { + op_queue<operation> completed_ops; + + op->ec_ = asio::error_code(); + impl->op_queue_.pop(); + completed_ops.push(op); + + if (!impl->op_queue_.empty()) + { + if (!RegisterWaitForSingleObject(&impl->wait_handle_, + impl->handle_, &win_object_handle_service::wait_callback, + param, INFINITE, WT_EXECUTEONLYONCE)) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + + while ((op = impl->op_queue_.front()) != 0) + { + op->ec_ = ec; + impl->op_queue_.pop(); + completed_ops.push(op); + } + } + } + + io_context_impl& ioc = impl->owner_->io_context_; + lock.unlock(); + ioc.post_deferred_completions(completed_ops); + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) + +#endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP diff --git a/lib/asio/detail/impl/win_static_mutex.ipp b/lib/asio/detail/impl/win_static_mutex.ipp new file mode 100644 index 0000000..42089bd --- /dev/null +++ b/lib/asio/detail/impl/win_static_mutex.ipp @@ -0,0 +1,136 @@ +// +// detail/impl/win_static_mutex.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP +#define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS) + +#include <cstdio> +#include "asio/detail/throw_error.hpp" +#include "asio/detail/win_static_mutex.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +void win_static_mutex::init() +{ + int error = do_init(); + asio::error_code ec(error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "static_mutex"); +} + +int win_static_mutex::do_init() +{ + using namespace std; // For sprintf. + wchar_t mutex_name[128]; +#if defined(ASIO_HAS_SECURE_RTL) + swprintf_s( +#else // defined(ASIO_HAS_SECURE_RTL) + _snwprintf( +#endif // defined(ASIO_HAS_SECURE_RTL) + mutex_name, 128, L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p", + static_cast<unsigned int>(::GetCurrentProcessId()), this); + +#if defined(ASIO_WINDOWS_APP) + HANDLE mutex = ::CreateMutexExW(0, mutex_name, CREATE_MUTEX_INITIAL_OWNER, 0); +#else // defined(ASIO_WINDOWS_APP) + HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name); +#endif // defined(ASIO_WINDOWS_APP) + DWORD last_error = ::GetLastError(); + if (mutex == 0) + return ::GetLastError(); + + if (last_error == ERROR_ALREADY_EXISTS) + { +#if defined(ASIO_WINDOWS_APP) + ::WaitForSingleObjectEx(mutex, INFINITE, false); +#else // defined(ASIO_WINDOWS_APP) + ::WaitForSingleObject(mutex, INFINITE); +#endif // defined(ASIO_WINDOWS_APP) + } + + if (initialised_) + { + ::ReleaseMutex(mutex); + ::CloseHandle(mutex); + return 0; + } + +#if defined(__MINGW32__) + // Not sure if MinGW supports structured exception handling, so for now + // we'll just call the Windows API and hope. +# if defined(UNDER_CE) + ::InitializeCriticalSection(&crit_section_); +# else + if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) + { + last_error = ::GetLastError(); + ::ReleaseMutex(mutex); + ::CloseHandle(mutex); + return last_error; + } +# endif +#else + __try + { +# if defined(UNDER_CE) + ::InitializeCriticalSection(&crit_section_); +# elif defined(ASIO_WINDOWS_APP) + if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) + { + last_error = ::GetLastError(); + ::ReleaseMutex(mutex); + ::CloseHandle(mutex); + return last_error; + } +# else + if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) + { + last_error = ::GetLastError(); + ::ReleaseMutex(mutex); + ::CloseHandle(mutex); + return last_error; + } +# endif + } + __except(GetExceptionCode() == STATUS_NO_MEMORY + ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) + { + ::ReleaseMutex(mutex); + ::CloseHandle(mutex); + return ERROR_OUTOFMEMORY; + } +#endif + + initialised_ = true; + ::ReleaseMutex(mutex); + ::CloseHandle(mutex); + return 0; +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) + +#endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP diff --git a/lib/asio/detail/impl/win_thread.ipp b/lib/asio/detail/impl/win_thread.ipp new file mode 100644 index 0000000..ed6dbaf --- /dev/null +++ b/lib/asio/detail/impl/win_thread.ipp @@ -0,0 +1,150 @@ +// +// detail/impl/win_thread.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP +#define ASIO_DETAIL_IMPL_WIN_THREAD_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS) \ + && !defined(ASIO_WINDOWS_APP) \ + && !defined(UNDER_CE) + +#include <process.h> +#include "asio/detail/throw_error.hpp" +#include "asio/detail/win_thread.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +win_thread::~win_thread() +{ + ::CloseHandle(thread_); + + // The exit_event_ handle is deliberately allowed to leak here since it + // is an error for the owner of an internal thread not to join() it. +} + +void win_thread::join() +{ + HANDLE handles[2] = { exit_event_, thread_ }; + ::WaitForMultipleObjects(2, handles, FALSE, INFINITE); + ::CloseHandle(exit_event_); + if (terminate_threads()) + { + ::TerminateThread(thread_, 0); + } + else + { + ::QueueUserAPC(apc_function, thread_, 0); + ::WaitForSingleObject(thread_, INFINITE); + } +} + +std::size_t win_thread::hardware_concurrency() +{ + SYSTEM_INFO system_info; + ::GetSystemInfo(&system_info); + return system_info.dwNumberOfProcessors; +} + +void win_thread::start_thread(func_base* arg, unsigned int stack_size) +{ + ::HANDLE entry_event = 0; + arg->entry_event_ = entry_event = ::CreateEventW(0, true, false, 0); + if (!entry_event) + { + DWORD last_error = ::GetLastError(); + delete arg; + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "thread.entry_event"); + } + + arg->exit_event_ = exit_event_ = ::CreateEventW(0, true, false, 0); + if (!exit_event_) + { + DWORD last_error = ::GetLastError(); + delete arg; + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "thread.exit_event"); + } + + unsigned int thread_id = 0; + thread_ = reinterpret_cast<HANDLE>(::_beginthreadex(0, + stack_size, win_thread_function, arg, 0, &thread_id)); + if (!thread_) + { + DWORD last_error = ::GetLastError(); + delete arg; + if (entry_event) + ::CloseHandle(entry_event); + if (exit_event_) + ::CloseHandle(exit_event_); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "thread"); + } + + if (entry_event) + { + ::WaitForSingleObject(entry_event, INFINITE); + ::CloseHandle(entry_event); + } +} + +unsigned int __stdcall win_thread_function(void* arg) +{ + win_thread::auto_func_base_ptr func = { + static_cast<win_thread::func_base*>(arg) }; + + ::SetEvent(func.ptr->entry_event_); + + func.ptr->run(); + + // Signal that the thread has finished its work, but rather than returning go + // to sleep to put the thread into a well known state. If the thread is being + // joined during global object destruction then it may be killed using + // TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx + // call will be interrupted using QueueUserAPC and the thread will shut down + // cleanly. + HANDLE exit_event = func.ptr->exit_event_; + delete func.ptr; + func.ptr = 0; + ::SetEvent(exit_event); + ::SleepEx(INFINITE, TRUE); + + return 0; +} + +#if defined(WINVER) && (WINVER < 0x0500) +void __stdcall apc_function(ULONG) {} +#else +void __stdcall apc_function(ULONG_PTR) {} +#endif + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) + // && !defined(ASIO_WINDOWS_APP) + // && !defined(UNDER_CE) + +#endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP diff --git a/lib/asio/detail/impl/win_tss_ptr.ipp b/lib/asio/detail/impl/win_tss_ptr.ipp new file mode 100644 index 0000000..61df0ce --- /dev/null +++ b/lib/asio/detail/impl/win_tss_ptr.ipp @@ -0,0 +1,57 @@ +// +// detail/impl/win_tss_ptr.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP +#define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS) + +#include "asio/detail/throw_error.hpp" +#include "asio/detail/win_tss_ptr.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +DWORD win_tss_ptr_create() +{ +#if defined(UNDER_CE) + const DWORD out_of_indexes = 0xFFFFFFFF; +#else + const DWORD out_of_indexes = TLS_OUT_OF_INDEXES; +#endif + + DWORD tss_key = ::TlsAlloc(); + if (tss_key == out_of_indexes) + { + DWORD last_error = ::GetLastError(); + asio::error_code ec(last_error, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "tss"); + } + return tss_key; +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) + +#endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP diff --git a/lib/asio/detail/impl/winrt_ssocket_service_base.ipp b/lib/asio/detail/impl/winrt_ssocket_service_base.ipp new file mode 100644 index 0000000..288ca8b --- /dev/null +++ b/lib/asio/detail/impl/winrt_ssocket_service_base.ipp @@ -0,0 +1,629 @@ +// +// detail/impl/winrt_ssocket_service_base.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP +#define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS_RUNTIME) + +#include <cstring> +#include "asio/detail/winrt_ssocket_service_base.hpp" +#include "asio/detail/winrt_async_op.hpp" +#include "asio/detail/winrt_utils.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +winrt_ssocket_service_base::winrt_ssocket_service_base( + asio::io_context& io_context) + : io_context_(use_service<io_context_impl>(io_context)), + async_manager_(use_service<winrt_async_manager>(io_context)), + mutex_(), + impl_list_(0) +{ +} + +void winrt_ssocket_service_base::base_shutdown() +{ + // Close all implementations, causing all operations to complete. + asio::detail::mutex::scoped_lock lock(mutex_); + base_implementation_type* impl = impl_list_; + while (impl) + { + asio::error_code ignored_ec; + close(*impl, ignored_ec); + impl = impl->next_; + } +} + +void winrt_ssocket_service_base::construct( + winrt_ssocket_service_base::base_implementation_type& impl) +{ + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; +} + +void winrt_ssocket_service_base::base_move_construct( + winrt_ssocket_service_base::base_implementation_type& impl, + winrt_ssocket_service_base::base_implementation_type& other_impl) +{ + impl.socket_ = other_impl.socket_; + other_impl.socket_ = nullptr; + + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + impl.next_ = impl_list_; + impl.prev_ = 0; + if (impl_list_) + impl_list_->prev_ = &impl; + impl_list_ = &impl; +} + +void winrt_ssocket_service_base::base_move_assign( + winrt_ssocket_service_base::base_implementation_type& impl, + winrt_ssocket_service_base& other_service, + winrt_ssocket_service_base::base_implementation_type& other_impl) +{ + asio::error_code ignored_ec; + close(impl, ignored_ec); + + if (this != &other_service) + { + // Remove implementation from linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; + } + + impl.socket_ = other_impl.socket_; + other_impl.socket_ = nullptr; + + if (this != &other_service) + { + // Insert implementation into linked list of all implementations. + asio::detail::mutex::scoped_lock lock(other_service.mutex_); + impl.next_ = other_service.impl_list_; + impl.prev_ = 0; + if (other_service.impl_list_) + other_service.impl_list_->prev_ = &impl; + other_service.impl_list_ = &impl; + } +} + +void winrt_ssocket_service_base::destroy( + winrt_ssocket_service_base::base_implementation_type& impl) +{ + asio::error_code ignored_ec; + close(impl, ignored_ec); + + // Remove implementation from linked list of all implementations. + asio::detail::mutex::scoped_lock lock(mutex_); + if (impl_list_ == &impl) + impl_list_ = impl.next_; + if (impl.prev_) + impl.prev_->next_ = impl.next_; + if (impl.next_) + impl.next_->prev_= impl.prev_; + impl.next_ = 0; + impl.prev_ = 0; +} + +asio::error_code winrt_ssocket_service_base::close( + winrt_ssocket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (impl.socket_) + { + delete impl.socket_; + impl.socket_ = nullptr; + } + + ec = asio::error_code(); + return ec; +} + +winrt_ssocket_service_base::native_handle_type +winrt_ssocket_service_base::release( + winrt_ssocket_service_base::base_implementation_type& impl, + asio::error_code& ec) +{ + if (!is_open(impl)) + return nullptr; + + cancel(impl, ec); + if (ec) + return nullptr; + + native_handle_type tmp = impl.socket_; + impl.socket_ = nullptr; + return tmp; +} + +std::size_t winrt_ssocket_service_base::do_get_endpoint( + const base_implementation_type& impl, bool local, + void* addr, std::size_t addr_len, asio::error_code& ec) const +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return addr_len; + } + + try + { + std::string addr_string = winrt_utils::string(local + ? impl.socket_->Information->LocalAddress->CanonicalName + : impl.socket_->Information->RemoteAddress->CanonicalName); + unsigned short port = winrt_utils::integer(local + ? impl.socket_->Information->LocalPort + : impl.socket_->Information->RemotePort); + unsigned long scope = 0; + + switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family) + { + case ASIO_OS_DEF(AF_INET): + if (addr_len < sizeof(sockaddr_in4_type)) + { + ec = asio::error::invalid_argument; + return addr_len; + } + else + { + socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(), + &reinterpret_cast<sockaddr_in4_type*>(addr)->sin_addr, &scope, ec); + reinterpret_cast<sockaddr_in4_type*>(addr)->sin_port + = socket_ops::host_to_network_short(port); + ec = asio::error_code(); + return sizeof(sockaddr_in4_type); + } + case ASIO_OS_DEF(AF_INET6): + if (addr_len < sizeof(sockaddr_in6_type)) + { + ec = asio::error::invalid_argument; + return addr_len; + } + else + { + socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(), + &reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_addr, &scope, ec); + reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_port + = socket_ops::host_to_network_short(port); + ec = asio::error_code(); + return sizeof(sockaddr_in6_type); + } + default: + ec = asio::error::address_family_not_supported; + return addr_len; + } + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + return addr_len; + } +} + +asio::error_code winrt_ssocket_service_base::do_set_option( + winrt_ssocket_service_base::base_implementation_type& impl, + int level, int optname, const void* optval, + std::size_t optlen, asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return ec; + } + + try + { + if (level == ASIO_OS_DEF(SOL_SOCKET) + && optname == ASIO_OS_DEF(SO_KEEPALIVE)) + { + if (optlen == sizeof(int)) + { + int value = 0; + std::memcpy(&value, optval, optlen); + impl.socket_->Control->KeepAlive = !!value; + ec = asio::error_code(); + } + else + { + ec = asio::error::invalid_argument; + } + } + else if (level == ASIO_OS_DEF(IPPROTO_TCP) + && optname == ASIO_OS_DEF(TCP_NODELAY)) + { + if (optlen == sizeof(int)) + { + int value = 0; + std::memcpy(&value, optval, optlen); + impl.socket_->Control->NoDelay = !!value; + ec = asio::error_code(); + } + else + { + ec = asio::error::invalid_argument; + } + } + else + { + ec = asio::error::invalid_argument; + } + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + } + + return ec; +} + +void winrt_ssocket_service_base::do_get_option( + const winrt_ssocket_service_base::base_implementation_type& impl, + int level, int optname, void* optval, + std::size_t* optlen, asio::error_code& ec) const +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return; + } + + try + { + if (level == ASIO_OS_DEF(SOL_SOCKET) + && optname == ASIO_OS_DEF(SO_KEEPALIVE)) + { + if (*optlen >= sizeof(int)) + { + int value = impl.socket_->Control->KeepAlive ? 1 : 0; + std::memcpy(optval, &value, sizeof(int)); + *optlen = sizeof(int); + ec = asio::error_code(); + } + else + { + ec = asio::error::invalid_argument; + } + } + else if (level == ASIO_OS_DEF(IPPROTO_TCP) + && optname == ASIO_OS_DEF(TCP_NODELAY)) + { + if (*optlen >= sizeof(int)) + { + int value = impl.socket_->Control->NoDelay ? 1 : 0; + std::memcpy(optval, &value, sizeof(int)); + *optlen = sizeof(int); + ec = asio::error_code(); + } + else + { + ec = asio::error::invalid_argument; + } + } + else + { + ec = asio::error::invalid_argument; + } + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + } +} + +asio::error_code winrt_ssocket_service_base::do_connect( + winrt_ssocket_service_base::base_implementation_type& impl, + const void* addr, asio::error_code& ec) +{ + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return ec; + } + + char addr_string[max_addr_v6_str_len]; + unsigned short port; + switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family) + { + case ASIO_OS_DEF(AF_INET): + socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), + &reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr, + addr_string, sizeof(addr_string), 0, ec); + port = socket_ops::network_to_host_short( + reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port); + break; + case ASIO_OS_DEF(AF_INET6): + socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), + &reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr, + addr_string, sizeof(addr_string), 0, ec); + port = socket_ops::network_to_host_short( + reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port); + break; + default: + ec = asio::error::address_family_not_supported; + return ec; + } + + if (!ec) try + { + async_manager_.sync(impl.socket_->ConnectAsync( + ref new Windows::Networking::HostName( + winrt_utils::string(addr_string)), + winrt_utils::string(port)), ec); + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + } + + return ec; +} + +void winrt_ssocket_service_base::start_connect_op( + winrt_ssocket_service_base::base_implementation_type& impl, + const void* addr, winrt_async_op<void>* op, bool is_continuation) +{ + if (!is_open(impl)) + { + op->ec_ = asio::error::bad_descriptor; + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + char addr_string[max_addr_v6_str_len]; + unsigned short port = 0; + switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family) + { + case ASIO_OS_DEF(AF_INET): + socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), + &reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr, + addr_string, sizeof(addr_string), 0, op->ec_); + port = socket_ops::network_to_host_short( + reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port); + break; + case ASIO_OS_DEF(AF_INET6): + socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), + &reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr, + addr_string, sizeof(addr_string), 0, op->ec_); + port = socket_ops::network_to_host_short( + reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port); + break; + default: + op->ec_ = asio::error::address_family_not_supported; + break; + } + + if (op->ec_) + { + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + try + { + async_manager_.async(impl.socket_->ConnectAsync( + ref new Windows::Networking::HostName( + winrt_utils::string(addr_string)), + winrt_utils::string(port)), op); + } + catch (Platform::Exception^ e) + { + op->ec_ = asio::error_code( + e->HResult, asio::system_category()); + io_context_.post_immediate_completion(op, is_continuation); + } +} + +std::size_t winrt_ssocket_service_base::do_send( + winrt_ssocket_service_base::base_implementation_type& impl, + const asio::const_buffer& data, + socket_base::message_flags flags, asio::error_code& ec) +{ + if (flags) + { + ec = asio::error::operation_not_supported; + return 0; + } + + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return 0; + } + + try + { + buffer_sequence_adapter<asio::const_buffer, + asio::const_buffer> bufs(asio::buffer(data)); + + if (bufs.all_empty()) + { + ec = asio::error_code(); + return 0; + } + + return async_manager_.sync( + impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec); + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + return 0; + } +} + +void winrt_ssocket_service_base::start_send_op( + winrt_ssocket_service_base::base_implementation_type& impl, + const asio::const_buffer& data, socket_base::message_flags flags, + winrt_async_op<unsigned int>* op, bool is_continuation) +{ + if (flags) + { + op->ec_ = asio::error::operation_not_supported; + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + if (!is_open(impl)) + { + op->ec_ = asio::error::bad_descriptor; + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + try + { + buffer_sequence_adapter<asio::const_buffer, + asio::const_buffer> bufs(asio::buffer(data)); + + if (bufs.all_empty()) + { + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + async_manager_.async( + impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op); + } + catch (Platform::Exception^ e) + { + op->ec_ = asio::error_code(e->HResult, + asio::system_category()); + io_context_.post_immediate_completion(op, is_continuation); + } +} + +std::size_t winrt_ssocket_service_base::do_receive( + winrt_ssocket_service_base::base_implementation_type& impl, + const asio::mutable_buffer& data, + socket_base::message_flags flags, asio::error_code& ec) +{ + if (flags) + { + ec = asio::error::operation_not_supported; + return 0; + } + + if (!is_open(impl)) + { + ec = asio::error::bad_descriptor; + return 0; + } + + try + { + buffer_sequence_adapter<asio::mutable_buffer, + asio::mutable_buffer> bufs(asio::buffer(data)); + + if (bufs.all_empty()) + { + ec = asio::error_code(); + return 0; + } + + async_manager_.sync( + impl.socket_->InputStream->ReadAsync( + bufs.buffers()[0], bufs.buffers()[0]->Capacity, + Windows::Storage::Streams::InputStreamOptions::Partial), ec); + + std::size_t bytes_transferred = bufs.buffers()[0]->Length; + if (bytes_transferred == 0 && !ec) + { + ec = asio::error::eof; + } + + return bytes_transferred; + } + catch (Platform::Exception^ e) + { + ec = asio::error_code(e->HResult, + asio::system_category()); + return 0; + } +} + +void winrt_ssocket_service_base::start_receive_op( + winrt_ssocket_service_base::base_implementation_type& impl, + const asio::mutable_buffer& data, socket_base::message_flags flags, + winrt_async_op<Windows::Storage::Streams::IBuffer^>* op, + bool is_continuation) +{ + if (flags) + { + op->ec_ = asio::error::operation_not_supported; + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + if (!is_open(impl)) + { + op->ec_ = asio::error::bad_descriptor; + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + try + { + buffer_sequence_adapter<asio::mutable_buffer, + asio::mutable_buffer> bufs(asio::buffer(data)); + + if (bufs.all_empty()) + { + io_context_.post_immediate_completion(op, is_continuation); + return; + } + + async_manager_.async( + impl.socket_->InputStream->ReadAsync( + bufs.buffers()[0], bufs.buffers()[0]->Capacity, + Windows::Storage::Streams::InputStreamOptions::Partial), op); + } + catch (Platform::Exception^ e) + { + op->ec_ = asio::error_code(e->HResult, + asio::system_category()); + io_context_.post_immediate_completion(op, is_continuation); + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP diff --git a/lib/asio/detail/impl/winrt_timer_scheduler.hpp b/lib/asio/detail/impl/winrt_timer_scheduler.hpp new file mode 100644 index 0000000..856378f --- /dev/null +++ b/lib/asio/detail/impl/winrt_timer_scheduler.hpp @@ -0,0 +1,92 @@ +// +// detail/impl/winrt_timer_scheduler.hpp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP +#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS_RUNTIME) + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +template <typename Time_Traits> +void winrt_timer_scheduler::add_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_add_timer_queue(queue); +} + +// Remove a timer queue from the reactor. +template <typename Time_Traits> +void winrt_timer_scheduler::remove_timer_queue(timer_queue<Time_Traits>& queue) +{ + do_remove_timer_queue(queue); +} + +template <typename Time_Traits> +void winrt_timer_scheduler::schedule_timer(timer_queue<Time_Traits>& queue, + const typename Time_Traits::time_type& time, + typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + + if (shutdown_) + { + io_context_.post_immediate_completion(op, false); + return; + } + + bool earliest = queue.enqueue_timer(time, timer, op); + io_context_.work_started(); + if (earliest) + event_.signal(lock); +} + +template <typename Time_Traits> +std::size_t winrt_timer_scheduler::cancel_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& timer, + std::size_t max_cancelled) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); + lock.unlock(); + io_context_.post_deferred_completions(ops); + return n; +} + +template <typename Time_Traits> +void winrt_timer_scheduler::move_timer(timer_queue<Time_Traits>& queue, + typename timer_queue<Time_Traits>::per_timer_data& to, + typename timer_queue<Time_Traits>::per_timer_data& from) +{ + asio::detail::mutex::scoped_lock lock(mutex_); + op_queue<operation> ops; + queue.cancel_timer(to, ops); + queue.move_timer(to, from); + lock.unlock(); + scheduler_.post_deferred_completions(ops); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP diff --git a/lib/asio/detail/impl/winrt_timer_scheduler.ipp b/lib/asio/detail/impl/winrt_timer_scheduler.ipp new file mode 100644 index 0000000..ef21399 --- /dev/null +++ b/lib/asio/detail/impl/winrt_timer_scheduler.ipp @@ -0,0 +1,122 @@ +// +// detail/impl/winrt_timer_scheduler.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP +#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS_RUNTIME) + +#include "asio/detail/bind_handler.hpp" +#include "asio/detail/winrt_timer_scheduler.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +winrt_timer_scheduler::winrt_timer_scheduler( + asio::io_context& io_context) + : asio::detail::service_base<winrt_timer_scheduler>(io_context), + io_context_(use_service<io_context_impl>(io_context)), + mutex_(), + event_(), + timer_queues_(), + thread_(0), + stop_thread_(false), + shutdown_(false) +{ + thread_ = new asio::detail::thread( + bind_handler(&winrt_timer_scheduler::call_run_thread, this)); +} + +winrt_timer_scheduler::~winrt_timer_scheduler() +{ + shutdown(); +} + +void winrt_timer_scheduler::shutdown() +{ + asio::detail::mutex::scoped_lock lock(mutex_); + shutdown_ = true; + stop_thread_ = true; + event_.signal(lock); + lock.unlock(); + + if (thread_) + { + thread_->join(); + delete thread_; + thread_ = 0; + } + + op_queue<operation> ops; + timer_queues_.get_all_timers(ops); + io_context_.abandon_operations(ops); +} + +void winrt_timer_scheduler::notify_fork(asio::io_context::fork_event) +{ +} + +void winrt_timer_scheduler::init_task() +{ +} + +void winrt_timer_scheduler::run_thread() +{ + asio::detail::mutex::scoped_lock lock(mutex_); + while (!stop_thread_) + { + const long max_wait_duration = 5 * 60 * 1000000; + long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration); + event_.wait_for_usec(lock, wait_duration); + event_.clear(lock); + op_queue<operation> ops; + timer_queues_.get_ready_timers(ops); + if (!ops.empty()) + { + lock.unlock(); + io_context_.post_deferred_completions(ops); + lock.lock(); + } + } +} + +void winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler) +{ + scheduler->run_thread(); +} + +void winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.insert(&queue); +} + +void winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue) +{ + mutex::scoped_lock lock(mutex_); + timer_queues_.erase(&queue); +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS_RUNTIME) + +#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP diff --git a/lib/asio/detail/impl/winsock_init.ipp b/lib/asio/detail/impl/winsock_init.ipp new file mode 100644 index 0000000..da4b0c0 --- /dev/null +++ b/lib/asio/detail/impl/winsock_init.ipp @@ -0,0 +1,82 @@ +// +// detail/impl/winsock_init.ipp +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP +#define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP + +#if defined(_MSC_VER) && (_MSC_VER >= 1200) +# pragma once +#endif // defined(_MSC_VER) && (_MSC_VER >= 1200) + +#include "asio/detail/config.hpp" + +#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) + +#include "asio/detail/socket_types.hpp" +#include "asio/detail/winsock_init.hpp" +#include "asio/detail/throw_error.hpp" +#include "asio/error.hpp" + +#include "asio/detail/push_options.hpp" + +namespace asio { +namespace detail { + +void winsock_init_base::startup(data& d, + unsigned char major, unsigned char minor) +{ + if (::InterlockedIncrement(&d.init_count_) == 1) + { + WSADATA wsa_data; + long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data); + ::InterlockedExchange(&d.result_, result); + } +} + +void winsock_init_base::manual_startup(data& d) +{ + if (::InterlockedIncrement(&d.init_count_) == 1) + { + ::InterlockedExchange(&d.result_, 0); + } +} + +void winsock_init_base::cleanup(data& d) +{ + if (::InterlockedDecrement(&d.init_count_) == 0) + { + ::WSACleanup(); + } +} + +void winsock_init_base::manual_cleanup(data& d) +{ + ::InterlockedDecrement(&d.init_count_); +} + +void winsock_init_base::throw_on_error(data& d) +{ + long result = ::InterlockedExchangeAdd(&d.result_, 0); + if (result != 0) + { + asio::error_code ec(result, + asio::error::get_system_category()); + asio::detail::throw_error(ec, "winsock"); + } +} + +} // namespace detail +} // namespace asio + +#include "asio/detail/pop_options.hpp" + +#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) + +#endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP |