removed old libuv

This commit is contained in:
TJIC 2014-08-22 17:35:35 -04:00
parent a1bc6dcbf8
commit 12175c11d2
214 changed files with 0 additions and 61774 deletions

View File

@ -1,38 +0,0 @@
*.swp
*.[oa]
*.l[oa]
*.opensdf
*.orig
*.pyc
*.sdf
*.suo
core
vgcore.*
.buildstamp
/libuv.so
/libuv.dylib
# Generated by dtrace(1) when doing an in-tree build.
/src/unix/uv-dtrace.h
/out/
/build/gyp
/run-tests
/run-tests.exe
/run-tests.dSYM
/run-benchmarks
/run-benchmarks.exe
/run-benchmarks.dSYM
*.sln
*.vcproj
*.vcxproj
*.vcxproj.filters
*.vcxproj.user
_UpgradeReport_Files/
UpgradeLog*.XML
Debug
Release
ipch

View File

@ -1,18 +0,0 @@
Alan Gutierrez <alan@prettyrobots.com> <alan@blogometer.com>
Bert Belder <bertbelder@gmail.com> <info@2bs.nl>
Bert Belder <bertbelder@gmail.com> <user@ChrUbuntu.(none)>
Brandon Philips <brandon.philips@rackspace.com> <brandon@ifup.org>
Brian White <mscdex@mscdex.net>
Brian White <mscdex@mscdex.net> <mscdex@gmail.com>
Frank Denis <github@pureftpd.org>
Isaac Z. Schlueter <i@izs.me>
Marc Schlaich <marc.schlaich@googlemail.com> <marc.schlaich@gmail.com>
Robert Mustacchi <rm@joyent.com> <rm@fingolfin.org>
Ryan Dahl <ryan@joyent.com> <ry@tinyclouds.org>
Ryan Emery <seebees@gmail.com>
San-Tai Hsu <vanilla@fatpipi.com>
Saúl Ibarra Corretgé <saghul@gmail.com>
Shigeki Ohtsu <ohtsu@iij.ad.jp> <ohtsu@ohtsu.org>
Timothy J. Fontaine <tjfontaine@gmail.com>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Yuki Okumura <mjt@cltn.org>

View File

@ -1,96 +0,0 @@
# Authors ordered by first contribution.
Ryan Dahl <ryan@joyent.com>
Bert Belder <bertbelder@gmail.com>
Josh Roesslein <jroesslein@gmail.com>
Alan Gutierrez <alan@prettyrobots.com>
Joshua Peek <josh@joshpeek.com>
Igor Zinkovsky <igorzi@microsoft.com>
San-Tai Hsu <vanilla@fatpipi.com>
Ben Noordhuis <info@bnoordhuis.nl>
Henry Rawas <henryr@schakra.com>
Robert Mustacchi <rm@joyent.com>
Matt Stevens <matt@alloysoft.com>
Paul Querna <pquerna@apache.org>
Shigeki Ohtsu <ohtsu@iij.ad.jp>
Tom Hughes <tom.hughes@palm.com>
Peter Bright <drpizza@quiscalusmexicanus.org>
Jeroen Janssen <jeroen.janssen@gmail.com>
Andrea Lattuada <ndr.lattuada@gmail.com>
Augusto Henrique Hentz <ahhentz@gmail.com>
Clifford Heath <clifford.heath@gmail.com>
Jorge Chamorro Bieling <jorge@jorgechamorro.com>
Luis Lavena <luislavena@gmail.com>
Matthew Sporleder <msporleder@gmail.com>
Erick Tryzelaar <erick.tryzelaar@gmail.com>
Isaac Z. Schlueter <i@izs.me>
Pieter Noordhuis <pcnoordhuis@gmail.com>
Marek Jelen <marek@jelen.biz>
Fedor Indutny <fedor.indutny@gmail.com>
Saúl Ibarra Corretgé <saghul@gmail.com>
Felix Geisendörfer <felix@debuggable.com>
Yuki Okumura <mjt@cltn.org>
Roman Shtylman <shtylman@gmail.com>
Frank Denis <github@pureftpd.org>
Carter Allen <CarterA@opt-6.com>
Tj Holowaychuk <tj@vision-media.ca>
Shimon Doodkin <helpmepro1@gmail.com>
Ryan Emery <seebees@gmail.com>
Bruce Mitchener <bruce.mitchener@gmail.com>
Maciej Małecki <maciej.malecki@notimplemented.org>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Daisuke Murase <typester@cpan.org>
Paddy Byers <paddy.byers@gmail.com>
Dan VerWeire <dverweire@gmail.com>
Brandon Benvie <brandon@bbenvie.com>
Brandon Philips <brandon.philips@rackspace.com>
Nathan Rajlich <nathan@tootallnate.net>
Charlie McConnell <charlie@charlieistheman.com>
Vladimir Dronnikov <dronnikov@gmail.com>
Aaron Bieber <qbit@deftly.net>
Bulat Shakirzyanov <mallluhuct@gmail.com>
Brian White <mscdex@mscdex.net>
Erik Dubbelboer <erik@dubbelboer.com>
Keno Fischer <kenof@stanford.edu>
Ira Cooper <Ira.Cooper@mathworks.com>
Andrius Bentkus <andrius.bentkus@gmail.com>
Iñaki Baz Castillo <ibc@aliax.net>
Mark Cavage <mark.cavage@joyent.com>
George Yohng <georgegh@oss3d.com>
Xidorn Quan <quanxunzhen@gmail.com>
Roman Neuhauser <rneuhauser@suse.cz>
Shuhei Tanuma <shuhei.tanuma@gmail.com>
Bryan Cantrill <bcantrill@acm.org>
Trond Norbye <trond.norbye@gmail.com>
Tim Holy <holy@wustl.edu>
Prancesco Pertugio <meh@schizofreni.co>
Leonard Hecker <leonard.hecker91@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Luigi Grilli <luigi.grilli@gmail.com>
Shannen Saez <shannenlaptop@gmail.com>
Artur Adib <arturadib@gmail.com>
Hiroaki Nakamura <hnakamur@gmail.com>
Ting-Yu Lin <ph.minamo@cytisan.com>
Stephen Gallagher <sgallagh@redhat.com>
Shane Holloway <shane.holloway@ieee.org>
Andrew Shaffer <darawk@gmail.com>
Vlad Tudose <vlad.tudose@intel.com>
Ben Leslie <benno@benno.id.au>
Tim Bradshaw <tfb@cley.com>
Timothy J. Fontaine <tjfontaine@gmail.com>
Marc Schlaich <marc.schlaich@googlemail.com>
Brian Mazza <louseman@gmail.com>
Nils Maier <maierman@web.de>
Nicholas Vavilov <vvnicholas@gmail.com>
Miroslav Bajtoš <miro.bajtos@gmail.com>
Elliot Saba <staticfloat@gmail.com>
Wynn Wilkes <wynnw@movenetworks.com>
Andrei Sedoi <bsnote@gmail.com>
Chris Bank <cbank@adobe.com>
Geert Jansen <geertj@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com>
huxingyi <huxingyi@msn.com>
Alex Crichton <alex@alexcrichton.com>
Luca Bruno <lucab@debian.org>
Trevor Norris <trev.norris@gmail.com>
Oguz Bastemur <obastemur@gmail.com>
Alexis Campailla <alexis@janeasystems.com>

View File

@ -1,362 +0,0 @@
2014.02.19, Version 0.10.25 (Stable)
Changes since version 0.10.24:
* stream: start thread after assignments (Oguz Bastemur)
* unix: correct error when calling uv_shutdown twice (Saúl Ibarra Corretgé)
* windows: freeze in uv_tcp_endgame (Alexis Campailla)
* sunos: handle rearm errors (Fedor Indutny)
2014.01.30, Version 0.10.24 (Stable), aecd296b6bce9b40f06a61c5c94e43d45ac7308a
Changes since version 0.10.23:
* linux: move sscanf() out of the assert() (Trevor Norris)
* linux: fix C99/C++ comment (Fedor Indutny)
2014.01.23, Version 0.10.23 (Stable), dbd218e699fec8be311d85e4788be9e28ae884f8
Changes since version 0.10.22:
* linux: relax assumption on /proc/stat parsing (Luca Bruno)
* openbsd: fix obvious bug in uv_cpu_info (Fedor Indutny)
* process: close stdio after dup2'ing it (Fedor Indutny)
2014.01.08, Version 0.10.22 (Stable), f526c90eeff271d9323a9107b9a64a4671fd3103
Changes since version 0.10.21:
* windows: avoid assertion failure when pipe server is closed (Bert Belder)
2013.12.19, Version 0.10.21 (Stable), 375ebce068555f0ca8151b562edb5f1b263022db
Changes since version 0.10.20:
* unix: fix a possible memory leak in uv_fs_readdir (Alex Crichton)
2013.12.13, Version 0.10.20 (Stable), 04141464dd0fba90ace9aa6f7003ce139b888a40
Changes since version 0.10.19:
* linux: fix up SO_REUSEPORT back-port (Ben Noordhuis)
* fs-event: fix invalid memory access (huxingyi)
2013.11.13, Version 0.10.19 (Stable), 33959f7524090b8d2c6c41e2400ca77e31755059
Changes since version 0.10.18:
* darwin: avoid calling GetCurrentProcess (Fedor Indutny)
* unix: update events from pevents between polls (Fedor Indutny)
* fsevents: support japaneese characters in path (Chris Bank)
* linux: don't turn on SO_REUSEPORT socket option (Ben Noordhuis)
* build: fix windows smp build with gyp (Geert Jansen)
* linux: handle EPOLLHUP without EPOLLIN/EPOLLOUT (Ben Noordhuis)
* unix: fix reopened fd bug (Fedor Indutny)
* core: fix fake watcher list and count preservation (Fedor Indutny)
2013.10.19, Version 0.10.18 (Stable), 9ec52963b585e822e87bdc5de28d6143aff0d2e5
Changes since version 0.10.17:
* unix: fix uv_spawn() NULL pointer deref on ENOMEM (Ben Noordhuis)
* unix: don't close inherited fds on uv_spawn() fail (Ben Noordhuis)
* unix: revert recent FSEvent changes (Ben Noordhuis)
* unix: fix non-synchronized access in signal.c (Ben Noordhuis)
2013.09.25, Version 0.10.17 (Stable), 9670e0a93540c2f0d86c84a375f2303383c11e7e
Changes since version 0.10.16:
* build: remove GCC_WARN_ABOUT_MISSING_NEWLINE (Ben Noordhuis)
* darwin: fix 10.6 build error in fsevents.c (Ben Noordhuis)
2013.09.06, Version 0.10.16 (Stable), 2bce230d81f4853a23662cbeb26fe98010b1084b
Changes since version 0.10.15:
* windows: make uv_shutdown() for write-only pipes work (Bert Belder)
* windows: make uv_fs_open() report EINVAL when invalid arguments are passed
(Bert Belder)
* windows: make uv_fs_open() report _open_osfhandle() failure correctly (Bert
Belder)
* windows: make uv_fs_chmod() report errors correctly (Bert Belder)
* windows: wrap multi-statement macros in do..while block (Bert Belder)
2013.08.24, Version 0.10.15 (Stable), 221078a8fdd9b853c6b557b3d9a5dd744b4fdd6b
Changes since version 0.10.14:
* fsevents: create FSEvents thread on demand (Ben Noordhuis)
* fsevents: use a single thread for interacting with FSEvents, because it's not
thread-safe. (Fedor Indutny)
* fsevents: share FSEventStream between multiple FS watchers, which removes a
limit on the maximum number of file watchers that can be created on OS X.
(Fedor Indutny)
2013.08.22, Version 0.10.14 (Stable), 15d64132151c18b26346afa892444b95e2addad0
Changes since version 0.10.13:
* unix: retry waitpid() on EINTR (Ben Noordhuis)
2013.07.26, Version 0.10.13 (Stable), 381312e1fe6fecbabc943ccd56f0e7d114b3d064
Changes since version 0.10.12:
* unix, windows: fix uv_fs_chown() function prototype (Ben Noordhuis)
2013.07.10, Version 0.10.12 (Stable), 58a46221bba726746887a661a9f36fe9ff204209
Changes since version 0.10.11:
* linux: add support for MIPS (Andrei Sedoi)
* windows: uv_spawn shouldn't reject reparse points (Bert Belder)
* windows: use WSAGetLastError(), not errno (Ben Noordhuis)
* build: darwin: disable -fstrict-aliasing warnings (Ben Noordhuis)
* build: `all` now builds static and dynamic lib (Ben Noordhuis)
* unix: fix build when !defined(PTHREAD_MUTEX_ERRORCHECK) (Ben Noordhuis)
2013.06.13, Version 0.10.11 (Stable), c3b75406a66a10222a589cb173e8f469e9665c7e
Changes since version 0.10.10:
* unix: unconditionally stop handle on close (Ben Noordhuis)
* freebsd: don't enable dtrace if it's not available (Brian White)
* build: make HAVE_DTRACE=0 should disable dtrace (Timothy J. Fontaine)
* unix: remove overzealous assert (Ben Noordhuis)
* unix: clear UV_STREAM_SHUTTING after shutdown() (Ben Noordhuis)
* unix: fix busy loop, write if POLLERR or POLLHUP (Ben Noordhuis)
2013.06.05, Version 0.10.10 (Stable), 0d95a88bd35fce93863c57a460be613aea34d2c5
Changes since version 0.10.9:
* include: document uv_update_time() and uv_now() (Ben Noordhuis)
* linux: fix cpu model parsing on newer arm kernels (Ben Noordhuis)
* linux: fix memory leak in uv_cpu_info() error path (Ben Noordhuis)
* linux: don't ignore OOM errors in uv_cpu_info() (Ben Noordhuis)
* unix, windows: move uv_now() to uv-common.c (Ben Noordhuis)
* darwin: make uv_fs_sendfile() respect length param (Wynn Wilkes)
2013.05.29, Version 0.10.9 (Stable), a195f9ace23d92345baf57582678bfc3017e6632
Changes since version 0.10.8:
* unix: fix stream refcounting buglet (Ben Noordhuis)
* unix: remove erroneous asserts (Ben Noordhuis)
* unix: add uv__is_closing() macro (Ben Noordhuis)
* unix: stop stream POLLOUT watcher on write error (Ben Noordhuis)
2013.05.25, Version 0.10.8 (Stable), 0f39be12926fe2d8766a9f025797a473003e6504
Changes since version 0.10.7:
* windows: make uv_spawn not fail under job control (Bert Belder)
* darwin: assume CFRunLoopStop() isn't thread-safe (Fedor Indutny)
* win: fix UV_EALREADY incorrectly set (Bert Belder)
* darwin: make two uv__cf_*() functions static (Ben Noordhuis)
* darwin: task_info() cannot fail (Ben Noordhuis)
* unix: add mapping for ENETDOWN (Ben Noordhuis)
* unix: implicitly signal write errors to libuv user (Ben Noordhuis)
* unix: fix assert on signal pipe overflow (Bert Belder)
* unix: turn off POLLOUT after stream connect (Ben Noordhuis)
2013.05.15, Version 0.10.7 (Stable), 028baaf0846b686a81e992cb2f2f5a9b8e841fcf
Changes since version 0.10.6:
* windows: kill child processes when the parent dies (Bert Belder)
2013.05.15, Version 0.10.6 (Stable), 11e6613e6260d95c8cf11bf89a2759c24649319a
Changes since version 0.10.5:
* stream: fix osx select hack (Fedor Indutny)
* stream: fix small nit in select hack, add test (Fedor Indutny)
* build: link with libkvm on openbsd (Ben Noordhuis)
* stream: use harder sync restrictions for osx-hack (Fedor Indutny)
* unix: fix EMFILE error handling (Ben Noordhuis)
* darwin: fix unnecessary include headers (Daisuke Murase)
* darwin: rename darwin-getproctitle.m (Ben Noordhuis)
* build: convert predefined $PLATFORM to lower case (Elliot Saba)
* build: set soname in shared library (Ben Noordhuis)
* build: make `make test` link against .a again (Ben Noordhuis)
* darwin: fix ios build, don't require ApplicationServices (Ben Noordhuis)
* build: only set soname on shared object builds (Timothy J. Fontaine)
2013.04.24, Version 0.10.5 (Stable), 6595a7732c52eb4f8e57c88655f72997a8567a67
Changes since version 0.10.4:
* unix: silence STATIC_ASSERT compiler warnings (Ben Noordhuis)
* windows: make timers handle large timeouts (Miroslav Bajtoš)
* windows: remove superfluous assert statement (Bert Belder)
* unix: silence STATIC_ASSERT compiler warnings (Ben Noordhuis)
* linux: don't use fopen() in uv_resident_set_memory() (Ben Noordhuis)
2013.04.12, Version 0.10.4 (Stable), 85827e26403ac6dfa331af8ec9916ea7e27bd833
Changes since version 0.10.3:
* include: update uv_backend_fd() documentation (Ben Noordhuis)
* unix: include uv.h in src/version.c (Ben Noordhuis)
* unix: don't write more than IOV_MAX iovecs (Fedor Indutny)
* mingw-w64: don't call _set_invalid_parameter_handler (Nils Maier)
* build: gyp disable thin archives (Timothy J. Fontaine)
* sunos: re-export entire library when static (Timothy J. Fontaine)
* unix: dtrace probes for tick-start and tick-stop (Timothy J. Fontaine)
* windows: fix memory leak in fs__sendfile (Shannen Saez)
* windows: remove double initialization in uv_tty_init (Shannen Saez)
* build: fix dtrace-enabled out of tree build (Ben Noordhuis)
* build: squelch -Wdollar-in-identifier-extension warnings (Ben Noordhuis)
* inet: snprintf returns int, not size_t (Brian White)
* win: refactor uv_cpu_info (Bert Belder)
* build: add support for Visual Studio 2012 (Nicholas Vavilov)
* build: -Wno-dollar-in-identifier-extension is clang only (Ben Noordhuis)
2013.03.28, Version 0.10.3 (Stable), 31ebe23973dd98fd8a24c042b606f37a794e99d0
Changes since version 0.10.2:
* include: remove extraneous const from uv_version() (Ben Noordhuis)
* doc: update README, replace `OS` by `PLATFORM` (Ben Noordhuis)
* build: simplify .buildstamp rule (Ben Noordhuis)
* build: disable -Wstrict-aliasing on darwin (Ben Noordhuis)
* darwin: don't select(&exceptfds) in fallback path (Ben Noordhuis)
* unix: don't clear flags after closing UDP handle (Saúl Ibarra Corretgé)
2013.03.25, Version 0.10.2 (Stable), 0f36a00568f3e7608f97f6c6cdb081f4800a50c9
This is the first officially versioned release of libuv. Starting now
libuv will make releases independently of Node.js.
Changes since Node.js v0.10.0:
* test: add tap output for windows (Timothy J. Fontaine)
* unix: fix uv_tcp_simultaneous_accepts() logic (Ben Noordhuis)
* include: bump UV_VERSION_MINOR (Ben Noordhuis)
* unix: improve uv_guess_handle() implementation (Ben Noordhuis)
* stream: run try_select only for pipes and ttys (Fedor Indutny)
Changes since Node.js v0.10.1:
* build: rename OS to PLATFORM (Ben Noordhuis)
* unix: make uv_timer_init() initialize repeat (Brian Mazza)
* unix: make timers handle large timeouts (Ben Noordhuis)
* build: add OBJC makefile var (Ben Noordhuis)
* Add `uv_version()` and `uv_version_string()` APIs (Bert Belder)

View File

@ -1,41 +0,0 @@
libuv is part of the Node project: http://nodejs.org/
libuv may be distributed alone under Node's license:
====
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
====
This license applies to all parts of libuv that are not externally
maintained libraries.
The externally maintained libraries used by libuv are:
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
- ngx_queue.h (from Nginx), copyright Igor Sysoev. Two clause BSD license.
- inet_pton and inet_ntop implementations, contained in src/inet.c, are
copyright the Internet Systems Consortium, Inc., and licensed under the ISC
license.
- stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
clause BSD license.

View File

@ -1,53 +0,0 @@
# Copyright Joyent, Inc. and other Node contributors. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
SRCDIR ?= $(CURDIR)
ifeq (,$(builddir_name))
VPATH := $(SRCDIR)
include $(SRCDIR)/build.mk
else # Out of tree build.
# Drop all built-in rules.
.SUFFIXES:
.PHONY: $(builddir_name)
$(builddir_name): $(builddir_name)/.buildstamp
$(MAKE) -C $@ -f $(CURDIR)/Makefile $(MAKECMDGOALS) \
SRCDIR=$(CURDIR) builddir_name=
$(builddir_name)/.buildstamp:
mkdir -p $(dir $@)
touch $@
# Add no-op rules for Makefiles to stop make from trying to rebuild them.
Makefile:: ;
%.mk:: ;
# Turn everything else into a no-op rule that depends on the build directory.
%:: $(builddir_name) ;
.PHONY: clean distclean
clean distclean:
$(RM) -fr $(builddir_name)
endif

View File

@ -1,118 +0,0 @@
# libuv
libuv is a new platform layer for Node. Its purpose is to abstract IOCP on
Windows and epoll/kqueue/event ports/etc. on Unix systems. We intend to
eventually contain all platform differences in this library.
http://nodejs.org/
## Features
* Non-blocking TCP sockets
* Non-blocking named pipes
* UDP
* Timers
* Child process spawning
* Asynchronous DNS via `uv_getaddrinfo`.
* Asynchronous file system APIs `uv_fs_*`
* High resolution time `uv_hrtime`
* Current executable path look up `uv_exepath`
* Thread pool scheduling `uv_queue_work`
* ANSI escape code controlled TTY `uv_tty_t`
* File system events Currently supports inotify, `ReadDirectoryChangesW`
and kqueue. Event ports in the near future.
`uv_fs_event_t`
* IPC and socket sharing between processes `uv_write2`
## Community
* [Mailing list](http://groups.google.com/group/libuv)
## Documentation
* [include/uv.h](https://github.com/joyent/libuv/blob/master/include/uv.h)
&mdash; API documentation in the form of detailed header comments.
* [An Introduction to libuv](http://nikhilm.github.com/uvbook/) &mdash; An
overview of libuv with tutorials.
* [LXJS 2012 talk](http://www.youtube.com/watch?v=nGn60vDSxQ4) - High-level
introductory talk about libuv.
* [Tests and benchmarks](https://github.com/joyent/libuv/tree/master/test) -
API specification and usage examples.
## Build Instructions
For GCC (including MinGW) there are two methods building: via normal
makefiles or via GYP. GYP is a meta-build system which can generate MSVS,
Makefile, and XCode backends. It is best used for integration into other
projects. The old system is using plain GNU Makefiles.
To build via Makefile simply execute:
make
MinGW users should run this instead:
make PLATFORM=mingw
Out-of-tree builds are supported:
make builddir_name=/path/to/builddir
To build with Visual Studio run the vcbuild.bat file which will
checkout the GYP code into build/gyp and generate the uv.sln and
related files.
Windows users can also build from cmd-line using msbuild. This is
done by running vcbuild.bat from Visual Studio command prompt.
To have GYP generate build script for another system, make sure that
you have Python 2.6 or 2.7 installed, then checkout GYP into the
project tree manually:
mkdir -p build
svn co http://gyp.googlecode.com/svn/trunk build/gyp
Or:
mkdir -p build
git clone https://git.chromium.org/external/gyp.git build/gyp
Unix users run
./gyp_uv.py -f make
make -C out
Macintosh users run
./gyp_uv.py -f xcode
xcodebuild -project uv.xcodeproj -configuration Release -target All
Note for UNIX users: compile your project with `-D_LARGEFILE_SOURCE` and
`-D_FILE_OFFSET_BITS=64`. GYP builds take care of that automatically.
Note for Linux users: compile your project with `-D_GNU_SOURCE` when you
include `uv.h`. GYP builds take care of that automatically. If you use
autotools, add a `AC_GNU_SOURCE` declaration to your `configure.ac`.
## Supported Platforms
Microsoft Windows operating systems since Windows XP SP2. It can be built
with either Visual Studio or MinGW.
Linux 2.6 using the GCC toolchain.
MacOS using the GCC or XCode toolchain.
Solaris 121 and later using GCC toolchain.

View File

@ -1,171 +0,0 @@
# Copyright Joyent, Inc. and other Node contributors. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
ifdef PLATFORM
override PLATFORM := $(shell echo $(PLATFORM) | tr "[A-Z]" "[a-z]")
else
PLATFORM = $(shell sh -c 'uname -s | tr "[A-Z]" "[a-z]"')
endif
CPPFLAGS += -I$(SRCDIR)/include -I$(SRCDIR)/include/uv-private
ifeq (darwin,$(PLATFORM))
SOEXT = dylib
else
SOEXT = so
endif
ifneq (,$(findstring mingw,$(PLATFORM)))
include $(SRCDIR)/config-mingw.mk
else
include $(SRCDIR)/config-unix.mk
endif
BENCHMARKS= \
test/benchmark-async-pummel.o \
test/benchmark-async.o \
test/benchmark-fs-stat.o \
test/benchmark-getaddrinfo.o \
test/benchmark-loop-count.o \
test/benchmark-million-async.o \
test/benchmark-million-timers.o \
test/benchmark-multi-accept.o \
test/benchmark-ping-pongs.o \
test/benchmark-pound.o \
test/benchmark-pump.o \
test/benchmark-sizes.o \
test/benchmark-spawn.o \
test/benchmark-tcp-write-batch.o \
test/benchmark-thread.o \
test/benchmark-udp-pummel.o \
test/blackhole-server.o \
test/dns-server.o \
test/echo-server.o \
TESTS= \
test/blackhole-server.o \
test/dns-server.o \
test/echo-server.o \
test/test-active.o \
test/test-async.o \
test/test-barrier.o \
test/test-callback-order.o \
test/test-callback-stack.o \
test/test-condvar.o \
test/test-connection-fail.o \
test/test-cwd-and-chdir.o \
test/test-delayed-accept.o \
test/test-dlerror.o \
test/test-embed.o \
test/test-emfile.o \
test/test-error.o \
test/test-fail-always.o \
test/test-fs.o \
test/test-fs-event.o \
test/test-fs-poll.o \
test/test-getaddrinfo.o \
test/test-get-currentexe.o \
test/test-get-loadavg.o \
test/test-get-memory.o \
test/test-getsockname.o \
test/test-hrtime.o \
test/test-idle.o \
test/test-ipc.o \
test/test-ipc-send-recv.o \
test/test-loop-handles.o \
test/test-loop-stop.o \
test/test-multiple-listen.o \
test/test-mutexes.o \
test/test-osx-select.o \
test/test-pass-always.o \
test/test-ping-pong.o \
test/test-pipe-bind-error.o \
test/test-pipe-connect-error.o \
test/test-pipe-server-close.o \
test/test-platform-output.o \
test/test-poll.o \
test/test-poll-close.o \
test/test-process-title.o \
test/test-ref.o \
test/test-run-nowait.o \
test/test-run-once.o \
test/test-semaphore.o \
test/test-shutdown-close.o \
test/test-shutdown-eof.o \
test/test-shutdown-twice.o \
test/test-signal.o \
test/test-signal-multiple-loops.o \
test/test-spawn.o \
test/test-stdio-over-pipes.o \
test/test-tcp-bind6-error.o \
test/test-tcp-bind-error.o \
test/test-tcp-close.o \
test/test-tcp-close-accept.o \
test/test-tcp-close-while-connecting.o \
test/test-tcp-connect6-error.o \
test/test-tcp-connect-error-after-write.o \
test/test-tcp-connect-error.o \
test/test-tcp-connect-timeout.o \
test/test-tcp-flags.o \
test/test-tcp-open.o \
test/test-tcp-read-stop.o \
test/test-tcp-shutdown-after-write.o \
test/test-tcp-unexpected-read.o \
test/test-tcp-writealot.o \
test/test-tcp-write-to-half-open-connection.o \
test/test-thread.o \
test/test-threadpool.o \
test/test-threadpool-cancel.o \
test/test-timer-again.o \
test/test-timer-from-check.o \
test/test-timer.o \
test/test-tty.o \
test/test-udp-dgram-too-big.o \
test/test-udp-ipv6.o \
test/test-udp-multicast-join.o \
test/test-udp-multicast-ttl.o \
test/test-udp-open.o \
test/test-udp-options.o \
test/test-udp-send-and-recv.o \
test/test-util.o \
test/test-walk-handles.o \
test/test-watcher-cross-stop.o \
.PHONY: all bench clean clean-platform distclean test
run-tests$(E): test/run-tests.o test/runner.o $(RUNNER_SRC) $(TESTS) libuv.a
$(CC) $(CPPFLAGS) $(RUNNER_CFLAGS) -o $@ $^ $(RUNNER_LIBS) $(RUNNER_LDFLAGS)
run-benchmarks$(E): test/run-benchmarks.o test/runner.o $(RUNNER_SRC) $(BENCHMARKS) libuv.a
$(CC) $(CPPFLAGS) $(RUNNER_CFLAGS) -o $@ $^ $(RUNNER_LIBS) $(RUNNER_LDFLAGS)
test/echo.o: test/echo.c test/echo.h
test: run-tests$(E)
$(CURDIR)/$<
bench: run-benchmarks$(E)
$(CURDIR)/$<
clean distclean: clean-platform
$(RM) libuv.a libuv.$(SOEXT) \
test/run-tests.o test/run-benchmarks.o \
test/run-tests$(E) test/run-benchmarks$(E) \
$(BENCHMARKS) $(TESTS) $(RUNNER_LIBS)

View File

@ -1,233 +0,0 @@
#!/bin/sh
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
SPARSE=${SPARSE:-sparse}
SPARSE_FLAGS=${SPARSE_FLAGS:-"
-D__POSIX__
-Wsparse-all
-Wno-do-while
-Wno-transparent-union
-Iinclude
-Iinclude/uv-private
-Isrc
"}
SOURCES="
include/uv-private/ngx-queue.h
include/uv-private/tree.h
include/uv-private/uv-unix.h
include/uv.h
src/fs-poll.c
src/inet.c
src/unix/async.c
src/unix/core.c
src/unix/dl.c
src/unix/error.c
src/unix/fs.c
src/unix/getaddrinfo.c
src/unix/internal.h
src/unix/loop-watcher.c
src/unix/loop.c
src/unix/pipe.c
src/unix/poll.c
src/unix/process.c
src/unix/signal.c
src/unix/stream.c
src/unix/tcp.c
src/unix/thread.c
src/unix/threadpool.c
src/unix/timer.c
src/unix/tty.c
src/unix/udp.c
src/uv-common.c
src/uv-common.h
"
TESTS="
test/benchmark-async-pummel.c
test/benchmark-async.c
test/benchmark-fs-stat.c
test/benchmark-getaddrinfo.c
test/benchmark-loop-count.c
test/benchmark-million-async.c
test/benchmark-million-timers.c
test/benchmark-multi-accept.c
test/benchmark-ping-pongs.c
test/benchmark-pound.c
test/benchmark-pump.c
test/benchmark-sizes.c
test/benchmark-spawn.c
test/benchmark-tcp-write-batch.c
test/benchmark-thread.c
test/benchmark-udp-pummel.c
test/blackhole-server.c
test/dns-server.c
test/echo-server.c
test/run-benchmarks.c
test/run-tests.c
test/runner-unix.c
test/runner-unix.h
test/runner.c
test/runner.h
test/task.h
test/test-active.c
test/test-async.c
test/test-barrier.c
test/test-callback-order.c
test/test-callback-stack.c
test/test-condvar.c
test/test-connection-fail.c
test/test-cwd-and-chdir.c
test/test-delayed-accept.c
test/test-dlerror.c
test/test-embed.c
test/test-error.c
test/test-fail-always.c
test/test-fs-event.c
test/test-fs-poll.c
test/test-fs.c
test/test-get-currentexe.c
test/test-get-loadavg.c
test/test-get-memory.c
test/test-getaddrinfo.c
test/test-getsockname.c
test/test-hrtime.c
test/test-idle.c
test/test-ipc-send-recv.c
test/test-ipc.c
test/test-loop-handles.c
test/test-multiple-listen.c
test/test-mutexes.c
test/test-pass-always.c
test/test-ping-pong.c
test/test-pipe-bind-error.c
test/test-pipe-connect-error.c
test/test-pipe-server-close.c
test/test-platform-output.c
test/test-poll-close.c
test/test-poll.c
test/test-process-title.c
test/test-ref.c
test/test-run-nowait.c
test/test-run-once.c
test/test-semaphore.c
test/test-shutdown-close.c
test/test-shutdown-eof.c
test/test-signal-multiple-loops.c
test/test-signal.c
test/test-spawn.c
test/test-stdio-over-pipes.c
test/test-tcp-bind-error.c
test/test-tcp-bind6-error.c
test/test-tcp-close-while-connecting.c
test/test-tcp-close-accept.c
test/test-tcp-close.c
test/test-tcp-connect-error-after-write.c
test/test-tcp-connect-error.c
test/test-tcp-connect-timeout.c
test/test-tcp-connect6-error.c
test/test-tcp-flags.c
test/test-tcp-open.c
test/test-tcp-read-stop.c
test/test-tcp-shutdown-after-write.c
test/test-tcp-unexpected-read.c
test/test-tcp-write-error.c
test/test-tcp-write-to-half-open-connection.c
test/test-tcp-writealot.c
test/test-thread.c
test/test-threadpool-cancel.c
test/test-threadpool.c
test/test-timer-again.c
test/test-timer.c
test/test-tty.c
test/test-udp-dgram-too-big.c
test/test-udp-ipv6.c
test/test-udp-multicast-join.c
test/test-udp-multicast-ttl.c
test/test-udp-open.c
test/test-udp-options.c
test/test-udp-send-and-recv.c
test/test-util.c
test/test-walk-handles.c
test/test-watcher-cross-stop.c
"
case `uname -s` in
AIX)
SPARSE_FLAGS="$SPARSE_FLAGS -D_AIX=1"
SOURCES="$SOURCES
src/unix/aix.c"
;;
Darwin)
SPARSE_FLAGS="$SPARSE_FLAGS -D__APPLE__=1"
SOURCES="$SOURCES
include/uv-private/uv-bsd.h
src/unix/darwin.c
src/unix/kqueue.c
src/unix/fsevents.c"
;;
DragonFly)
SPARSE_FLAGS="$SPARSE_FLAGS -D__DragonFly__=1"
SOURCES="$SOURCES
include/uv-private/uv-bsd.h
src/unix/kqueue.c
src/unix/freebsd.c"
;;
FreeBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__FreeBSD__=1"
SOURCES="$SOURCES
include/uv-private/uv-bsd.h
src/unix/kqueue.c
src/unix/freebsd.c"
;;
Linux)
SPARSE_FLAGS="$SPARSE_FLAGS -D__linux__=1"
SOURCES="$SOURCES
include/uv-private/uv-linux.h
src/unix/linux-inotify.c
src/unix/linux-core.c
src/unix/linux-syscalls.c
src/unix/linux-syscalls.h"
;;
NetBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__NetBSD__=1"
SOURCES="$SOURCES
include/uv-private/uv-bsd.h
src/unix/kqueue.c
src/unix/netbsd.c"
;;
OpenBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__OpenBSD__=1"
SOURCES="$SOURCES
include/uv-private/uv-bsd.h
src/unix/kqueue.c
src/unix/openbsd.c"
;;
SunOS)
SPARSE_FLAGS="$SPARSE_FLAGS -D__sun=1"
SOURCES="$SOURCES
include/uv-private/uv-sunos.h
src/unix/sunos.c"
;;
esac
for ARCH in __i386__ __x86_64__ __arm__ __mips__; do
$SPARSE $SPARSE_FLAGS -D$ARCH=1 $SOURCES
done
# Tests are architecture independent.
$SPARSE $SPARSE_FLAGS -Itest $TESTS

View File

@ -1,207 +0,0 @@
{
'variables': {
'visibility%': 'hidden', # V8's visibility setting
'target_arch%': 'ia32', # set v8's target architecture
'host_arch%': 'ia32', # set v8's host architecture
'library%': 'static_library', # allow override to 'shared_library' for DLL/.so builds
'component%': 'static_library', # NB. these names match with what V8 expects
'msvs_multi_core_compile': '0', # we do enable multicore compiles, but not using the V8 way
'gcc_version%': 'unknown',
'clang%': 0,
},
'target_defaults': {
'default_configuration': 'Debug',
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'cflags': [ '-g', '-O0', '-fwrapv' ],
'msvs_settings': {
'VCCLCompilerTool': {
'target_conditions': [
['library=="static_library"', {
'RuntimeLibrary': 1, # static debug
}, {
'RuntimeLibrary': 3, # DLL debug
}],
],
'Optimization': 0, # /Od, no optimization
'MinimalRebuild': 'false',
'OmitFramePointers': 'false',
'BasicRuntimeChecks': 3, # /RTC1
},
'VCLinkerTool': {
'LinkIncremental': 2, # enable incremental linking
},
},
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0',
'OTHER_CFLAGS': [ '-Wno-strict-aliasing' ],
},
'conditions': [
['OS != "win"', {
'defines': [ 'EV_VERIFY=2' ],
}],
]
},
'Release': {
'defines': [ 'NDEBUG' ],
'cflags': [
'-O3',
'-fstrict-aliasing',
'-fomit-frame-pointer',
'-fdata-sections',
'-ffunction-sections',
],
'msvs_settings': {
'VCCLCompilerTool': {
'target_conditions': [
['library=="static_library"', {
'RuntimeLibrary': 0, # static release
}, {
'RuntimeLibrary': 2, # debug release
}],
],
'Optimization': 3, # /Ox, full optimization
'FavorSizeOrSpeed': 1, # /Ot, favour speed over size
'InlineFunctionExpansion': 2, # /Ob2, inline anything eligible
'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG
'OmitFramePointers': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
},
'VCLibrarianTool': {
'AdditionalOptions': [
'/LTCG', # link time code generation
],
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': 1, # link-time code generation
'OptimizeReferences': 2, # /OPT:REF
'EnableCOMDATFolding': 2, # /OPT:ICF
'LinkIncremental': 1, # disable incremental linking
},
},
}
},
'msvs_settings': {
'VCCLCompilerTool': {
'StringPooling': 'true', # pool string literals
'DebugInformationFormat': 3, # Generate a PDB
'WarningLevel': 3,
'BufferSecurityCheck': 'true',
'ExceptionHandling': 1, # /EHsc
'SuppressStartupBanner': 'true',
'WarnAsError': 'false',
'AdditionalOptions': [
'/MP', # compile across multiple CPUs
],
},
'VCLibrarianTool': {
},
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'RandomizedBaseAddress': 2, # enable ASLR
'DataExecutionPrevention': 2, # enable DEP
'AllowIsolation': 'true',
'SuppressStartupBanner': 'true',
'target_conditions': [
['_type=="executable"', {
'SubSystem': 1, # console executable
}],
],
},
},
'conditions': [
['OS == "win"', {
'msvs_cygwin_shell': 0, # prevent actions from trying to use cygwin
'defines': [
'WIN32',
# we don't really want VC++ warning us about
# how dangerous C functions are...
'_CRT_SECURE_NO_DEPRECATE',
# ... or that C implementations shouldn't use
# POSIX names
'_CRT_NONSTDC_NO_DEPRECATE',
],
'target_conditions': [
['target_arch=="x64"', {
'msvs_configuration_platform': 'x64'
}]
]
}],
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'cflags': [ '-Wall' ],
'cflags_cc': [ '-fno-rtti', '-fno-exceptions' ],
'target_conditions': [
['_type=="static_library"', {
'standalone_static_library': 1, # disable thin archive which needs binutils >= 2.19
}],
],
'conditions': [
[ 'host_arch != target_arch and target_arch=="ia32"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
[ 'OS=="linux"', {
'cflags': [ '-ansi' ],
}],
[ 'OS=="solaris"', {
'cflags': [ '-pthreads' ],
'ldflags': [ '-pthreads' ],
}, {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
}],
[ 'visibility=="hidden" and (clang==1 or gcc_version >= 40)', {
'cflags': [ '-fvisibility=hidden' ],
}],
],
}],
['OS=="mac"', {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks
'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic
# (Equivalent to -fPIC)
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO', # -fno-exceptions
'GCC_ENABLE_CPP_RTTI': 'NO', # -fno-rtti
'GCC_ENABLE_PASCAL_STRINGS': 'NO', # No -mpascal-strings
# GCC_INLINES_ARE_PRIVATE_EXTERN maps to -fvisibility-inlines-hidden
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'PREBINDING': 'NO', # No -Wl,-prebind
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fstrict-aliasing',
],
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
],
},
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {'ARCHS': ['i386']},
}],
['target_arch=="x64"', {
'xcode_settings': {'ARCHS': ['x86_64']},
}],
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
}],
],
}],
['OS=="solaris"', {
'cflags': [ '-fno-omit-frame-pointer' ],
# pull in V8's postmortem metadata
'ldflags': [ '-Wl,-z,allextract' ]
}],
],
},
}

View File

@ -1,48 +0,0 @@
# Copyright Joyent, Inc. and other Node contributors. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Use make -f Makefile.gcc PREFIX=i686-w64-mingw32-
# for cross compilation
CC = $(PREFIX)gcc
AR = $(PREFIX)ar
E=.exe
CFLAGS=$(CPPFLAGS) -g --std=gnu89 -D_WIN32_WINNT=0x0600
LDFLAGS=-lm
WIN_SRCS=$(wildcard $(SRCDIR)/src/win/*.c)
WIN_OBJS=$(WIN_SRCS:.c=.o)
RUNNER_CFLAGS=$(CFLAGS) -D_GNU_SOURCE # Need _GNU_SOURCE for strdup?
RUNNER_LDFLAGS=$(LDFLAGS)
RUNNER_LIBS=-lws2_32 -lpsapi -liphlpapi
RUNNER_SRC=test/runner-win.c
libuv.a: $(WIN_OBJS) src/fs-poll.o src/inet.o src/uv-common.o src/version.o
$(AR) rcs $@ $^
src/%.o: src/%.c include/uv.h include/uv-private/uv-win.h
$(CC) $(CFLAGS) -c $< -o $@
src/win/%.o: src/win/%.c include/uv.h include/uv-private/uv-win.h src/win/internal.h
$(CC) $(CFLAGS) -o $@ -c $<
clean-platform:
-rm -f src/win/*.o

View File

@ -1,207 +0,0 @@
# Copyright Joyent, Inc. and other Node contributors. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
E=
CSTDFLAG=--std=c89 -pedantic -Wall -Wextra -Wno-unused-parameter
CFLAGS += -g
CPPFLAGS += -I$(SRCDIR)/src
LDFLAGS=-lm
CPPFLAGS += -D_LARGEFILE_SOURCE
CPPFLAGS += -D_FILE_OFFSET_BITS=64
RUNNER_SRC=test/runner-unix.c
RUNNER_CFLAGS=$(CFLAGS) -I$(SRCDIR)/test
RUNNER_LDFLAGS=
DTRACE_OBJS=
DTRACE_HEADER=
OBJS += src/unix/async.o
OBJS += src/unix/core.o
OBJS += src/unix/dl.o
OBJS += src/unix/error.o
OBJS += src/unix/fs.o
OBJS += src/unix/getaddrinfo.o
OBJS += src/unix/loop.o
OBJS += src/unix/loop-watcher.o
OBJS += src/unix/pipe.o
OBJS += src/unix/poll.o
OBJS += src/unix/process.o
OBJS += src/unix/signal.o
OBJS += src/unix/stream.o
OBJS += src/unix/tcp.o
OBJS += src/unix/thread.o
OBJS += src/unix/threadpool.o
OBJS += src/unix/timer.o
OBJS += src/unix/tty.o
OBJS += src/unix/udp.o
OBJS += src/fs-poll.o
OBJS += src/uv-common.o
OBJS += src/inet.o
OBJS += src/version.o
ifeq (sunos,$(PLATFORM))
HAVE_DTRACE ?= 1
CPPFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500
LDFLAGS+=-lkstat -lnsl -lsendfile -lsocket
# Library dependencies are not transitive.
OBJS += src/unix/sunos.o
ifeq (1, $(HAVE_DTRACE))
OBJS += src/unix/dtrace.o
DTRACE_OBJS += src/unix/core.o
endif
endif
ifeq (aix,$(PLATFORM))
CPPFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500
LDFLAGS+= -lperfstat
OBJS += src/unix/aix.o
endif
ifeq (darwin,$(PLATFORM))
HAVE_DTRACE ?= 1
# dtrace(1) probes contain dollar signs on OS X. Mute the warnings they
# generate but only when CC=clang, -Wno-dollar-in-identifier-extension
# is a clang extension.
ifeq (__clang__,$(shell sh -c "$(CC) -dM -E - </dev/null | grep -ow __clang__"))
CFLAGS += -Wno-dollar-in-identifier-extension
endif
CPPFLAGS += -D_DARWIN_USE_64_BIT_INODE=1
LDFLAGS += -framework Foundation \
-framework CoreServices \
-framework ApplicationServices
SOEXT = dylib
OBJS += src/unix/darwin.o
OBJS += src/unix/kqueue.o
OBJS += src/unix/fsevents.o
OBJS += src/unix/proctitle.o
OBJS += src/unix/darwin-proctitle.o
endif
ifeq (linux,$(PLATFORM))
CSTDFLAG += -D_GNU_SOURCE
LDFLAGS+=-ldl -lrt
RUNNER_CFLAGS += -D_GNU_SOURCE
OBJS += src/unix/linux-core.o \
src/unix/linux-inotify.o \
src/unix/linux-syscalls.o \
src/unix/proctitle.o
endif
ifeq (freebsd,$(PLATFORM))
ifeq ($(shell dtrace -l 1>&2 2>/dev/null; echo $$?),0)
HAVE_DTRACE ?= 1
endif
LDFLAGS+=-lkvm
OBJS += src/unix/freebsd.o
OBJS += src/unix/kqueue.o
endif
ifeq (dragonfly,$(PLATFORM))
LDFLAGS+=-lkvm
OBJS += src/unix/freebsd.o
OBJS += src/unix/kqueue.o
endif
ifeq (netbsd,$(PLATFORM))
LDFLAGS+=-lkvm
OBJS += src/unix/netbsd.o
OBJS += src/unix/kqueue.o
endif
ifeq (openbsd,$(PLATFORM))
LDFLAGS+=-lkvm
OBJS += src/unix/openbsd.o
OBJS += src/unix/kqueue.o
endif
ifneq (,$(findstring cygwin,$(PLATFORM)))
# We drop the --std=c89, it hides CLOCK_MONOTONIC on cygwin
CSTDFLAG = -D_GNU_SOURCE
LDFLAGS+=
OBJS += src/unix/cygwin.o
endif
ifeq (sunos,$(PLATFORM))
RUNNER_LDFLAGS += -pthreads
else
RUNNER_LDFLAGS += -pthread
endif
ifeq ($(HAVE_DTRACE), 1)
DTRACE_HEADER = src/unix/uv-dtrace.h
CPPFLAGS += -Isrc/unix
CFLAGS += -DHAVE_DTRACE
endif
ifneq (darwin,$(PLATFORM))
# Must correspond with UV_VERSION_MAJOR and UV_VERSION_MINOR in src/version.c
SO_LDFLAGS = -Wl,-soname,libuv.so.0.10
endif
RUNNER_LDFLAGS += $(LDFLAGS)
all:
# Force a sequential build of the static and the shared library.
# Works around a make quirk where it forgets to (re)build either
# the *.o or *.pic.o files, depending on what target comes first.
$(MAKE) -f $(SRCDIR)/Makefile libuv.a
$(MAKE) -f $(SRCDIR)/Makefile libuv.$(SOEXT)
libuv.a: $(OBJS)
$(AR) rcs $@ $^
libuv.$(SOEXT): override CFLAGS += -fPIC
libuv.$(SOEXT): $(OBJS:%.o=%.pic.o)
$(CC) -shared -o $@ $^ $(LDFLAGS) $(SO_LDFLAGS)
include/uv-private/uv-unix.h: \
include/uv-private/uv-bsd.h \
include/uv-private/uv-darwin.h \
include/uv-private/uv-linux.h \
include/uv-private/uv-sunos.h
src/unix/internal.h: src/unix/linux-syscalls.h
src/.buildstamp src/unix/.buildstamp test/.buildstamp:
mkdir -p $(@D)
touch $@
src/unix/%.o src/unix/%.pic.o: src/unix/%.c include/uv.h include/uv-private/uv-unix.h src/unix/internal.h src/unix/.buildstamp $(DTRACE_HEADER)
$(CC) $(CSTDFLAG) $(CPPFLAGS) $(CFLAGS) -c $< -o $@
src/%.o src/%.pic.o: src/%.c include/uv.h include/uv-private/uv-unix.h src/.buildstamp
$(CC) $(CSTDFLAG) $(CPPFLAGS) $(CFLAGS) -c $< -o $@
test/%.o: test/%.c include/uv.h test/.buildstamp
$(CC) $(CSTDFLAG) $(CPPFLAGS) $(CFLAGS) -c $< -o $@
clean-platform:
$(RM) test/run-{tests,benchmarks}.dSYM $(OBJS) $(OBJS:%.o=%.pic.o) src/unix/uv-dtrace.h
src/unix/uv-dtrace.h: src/unix/uv-dtrace.d
dtrace -h -xnolibs -s $< -o $@
src/unix/dtrace.o: src/unix/uv-dtrace.d $(DTRACE_OBJS)
dtrace -G -s $^ -o $@
src/unix/dtrace.pic.o: src/unix/uv-dtrace.d $(DTRACE_OBJS:%.o=%.pic.o)
dtrace -G -s $^ -o $@

View File

@ -1,99 +0,0 @@
#!/usr/bin/env python
import glob
import platform
import os
import subprocess
import sys
CC = os.environ.get('CC', 'cc')
script_dir = os.path.dirname(__file__)
uv_root = os.path.normpath(script_dir)
output_dir = os.path.join(os.path.abspath(uv_root), 'out')
sys.path.insert(0, os.path.join(uv_root, 'build', 'gyp', 'pylib'))
try:
import gyp
except ImportError:
print('You need to install gyp in build/gyp first. See the README.')
sys.exit(42)
def host_arch():
machine = platform.machine()
if machine == 'i386': return 'ia32'
if machine == 'x86_64': return 'x64'
if machine.startswith('arm'): return 'arm'
if machine.startswith('mips'): return 'mips'
return machine # Return as-is and hope for the best.
def compiler_version():
proc = subprocess.Popen(CC.split() + ['--version'], stdout=subprocess.PIPE)
is_clang = 'clang' in proc.communicate()[0].split('\n')[0]
proc = subprocess.Popen(CC.split() + ['-dumpversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split('.')
version = map(int, version[:2])
version = tuple(version)
return (version, is_clang)
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
if sys.platform == 'win32':
args.append(os.path.join(uv_root, 'uv.gyp'))
common_fn = os.path.join(uv_root, 'common.gypi')
options_fn = os.path.join(uv_root, 'options.gypi')
# we force vs 2010 over 2008 which would otherwise be the default for gyp
if not os.environ.get('GYP_MSVS_VERSION'):
os.environ['GYP_MSVS_VERSION'] = '2010'
else:
args.append(os.path.join(os.path.abspath(uv_root), 'uv.gyp'))
common_fn = os.path.join(os.path.abspath(uv_root), 'common.gypi')
options_fn = os.path.join(os.path.abspath(uv_root), 'options.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
if os.path.exists(options_fn):
args.extend(['-I', options_fn])
args.append('--depth=' + uv_root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32':
if '-f' not in args:
args.extend('-f make'.split())
if 'eclipse' not in args and 'ninja' not in args:
args.extend(['-Goutput_dir=' + output_dir])
args.extend(['--generator-output', output_dir])
(major, minor), is_clang = compiler_version()
args.append('-Dgcc_version=%d' % (10 * major + minor))
args.append('-Dclang=%d' % int(is_clang))
if not any(a.startswith('-Dhost_arch=') for a in args):
args.append('-Dhost_arch=%s' % host_arch())
if not any(a.startswith('-Dtarget_arch=') for a in args):
args.append('-Dtarget_arch=%s' % host_arch())
if not any(a.startswith('-Dlibrary=') for a in args):
args.append('-Dlibrary=static_library')
if not any(a.startswith('-Dcomponent=') for a in args):
args.append('-Dcomponent=static_library')
gyp_args = list(args)
print gyp_args
run_gyp(gyp_args)

View File

@ -1,129 +0,0 @@
/*
* Copyright (C) Igor Sysoev
*/
#ifndef NGX_QUEUE_H_INCLUDED_
#define NGX_QUEUE_H_INCLUDED_
typedef struct ngx_queue_s ngx_queue_t;
struct ngx_queue_s {
ngx_queue_t *prev;
ngx_queue_t *next;
};
#define ngx_queue_init(q) \
do { \
(q)->prev = q; \
(q)->next = q; \
} \
while (0)
#define ngx_queue_empty(h) \
(h == (h)->prev)
#define ngx_queue_insert_head(h, x) \
do { \
(x)->next = (h)->next; \
(x)->next->prev = x; \
(x)->prev = h; \
(h)->next = x; \
} \
while (0)
#define ngx_queue_insert_after ngx_queue_insert_head
#define ngx_queue_insert_tail(h, x) \
do { \
(x)->prev = (h)->prev; \
(x)->prev->next = x; \
(x)->next = h; \
(h)->prev = x; \
} \
while (0)
#define ngx_queue_head(h) \
(h)->next
#define ngx_queue_last(h) \
(h)->prev
#define ngx_queue_sentinel(h) \
(h)
#define ngx_queue_next(q) \
(q)->next
#define ngx_queue_prev(q) \
(q)->prev
#if defined(NGX_DEBUG)
#define ngx_queue_remove(x) \
do { \
(x)->next->prev = (x)->prev; \
(x)->prev->next = (x)->next; \
(x)->prev = NULL; \
(x)->next = NULL; \
} \
while (0)
#else
#define ngx_queue_remove(x) \
do { \
(x)->next->prev = (x)->prev; \
(x)->prev->next = (x)->next; \
} \
while (0)
#endif
#define ngx_queue_split(h, q, n) \
do { \
(n)->prev = (h)->prev; \
(n)->prev->next = n; \
(n)->next = q; \
(h)->prev = (q)->prev; \
(h)->prev->next = h; \
(q)->prev = n; \
} \
while (0)
#define ngx_queue_add(h, n) \
do { \
(h)->prev->next = (n)->next; \
(n)->next->prev = (h)->prev; \
(h)->prev = (n)->prev; \
(h)->prev->next = h; \
} \
while (0)
#define ngx_queue_data(q, type, link) \
((type *) ((unsigned char *) q - offsetof(type, link)))
#define ngx_queue_foreach(q, h) \
for ((q) = ngx_queue_head(h); \
(q) != ngx_queue_sentinel(h) && !ngx_queue_empty(h); \
(q) = ngx_queue_next(q))
#endif /* NGX_QUEUE_H_INCLUDED_ */

View File

@ -1,247 +0,0 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]

View File

@ -1,768 +0,0 @@
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UV_TREE_H_
#define UV_TREE_H_
#ifndef UV__UNUSED
# if __GNUC__
# define UV__UNUSED __attribute__((unused))
# else
# define UV__UNUSED
# endif
#endif
/*
* This file defines data structures for different types of trees:
* splay trees and red-black trees.
*
* A splay tree is a self-organizing data structure. Every operation
* on the tree causes a splay to happen. The splay moves the requested
* node to the root of the tree and partly rebalances it.
*
* This has the benefit that request locality causes faster lookups as
* the requested nodes move to the top of the tree. On the other hand,
* every lookup causes memory writes.
*
* The Balance Theorem bounds the total access time for m operations
* and n inserts on an initially empty tree as O((m + n)lg n). The
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
#define SPLAY_HEAD(name, type) \
struct name { \
struct type *sph_root; /* root of the tree */ \
}
#define SPLAY_INITIALIZER(root) \
{ NULL }
#define SPLAY_INIT(root) do { \
(root)->sph_root = NULL; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ENTRY(type) \
struct { \
struct type *spe_left; /* left element */ \
struct type *spe_right; /* right element */ \
}
#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
#define SPLAY_ROOT(head) (head)->sph_root
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKLEFT(head, tmp, field) do { \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKRIGHT(head, tmp, field) do { \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
void name##_SPLAY(struct name *, struct type *); \
void name##_SPLAY_MINMAX(struct name *, int); \
struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
\
/* Finds the node with the same key as elm */ \
static __inline struct type * \
name##_SPLAY_FIND(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) \
return(NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) \
return (head->sph_root); \
return (NULL); \
} \
\
static __inline struct type * \
name##_SPLAY_NEXT(struct name *head, struct type *elm) \
{ \
name##_SPLAY(head, elm); \
if (SPLAY_RIGHT(elm, field) != NULL) { \
elm = SPLAY_RIGHT(elm, field); \
while (SPLAY_LEFT(elm, field) != NULL) { \
elm = SPLAY_LEFT(elm, field); \
} \
} else \
elm = NULL; \
return (elm); \
} \
\
static __inline struct type * \
name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
return (SPLAY_ROOT(head)); \
}
/* Main splay operation.
* Moves node close to the key of elm to top
*/
#define SPLAY_GENERATE(name, type, field, cmp) \
struct type * \
name##_SPLAY_INSERT(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) { \
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
} else { \
int __comp; \
name##_SPLAY(head, elm); \
__comp = (cmp)(elm, (head)->sph_root); \
if(__comp < 0) { \
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
SPLAY_LEFT((head)->sph_root, field) = NULL; \
} else if (__comp > 0) { \
SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
SPLAY_LEFT(elm, field) = (head)->sph_root; \
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
} else \
return ((head)->sph_root); \
} \
(head)->sph_root = (elm); \
return (NULL); \
} \
\
struct type * \
name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *__tmp; \
if (SPLAY_EMPTY(head)) \
return (NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) { \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} else { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
name##_SPLAY(head, elm); \
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
} \
return (elm); \
} \
return (NULL); \
} \
\
void \
name##_SPLAY(struct name *head, struct type *elm) \
{ \
struct type __node, *__left, *__right, *__tmp; \
int __comp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
__left = __right = &__node; \
\
while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) > 0){ \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
} \
\
/* Splay with either the minimum or the maximum element \
* Used to find minimum or maximum element in tree. \
*/ \
void name##_SPLAY_MINMAX(struct name *head, int __comp) \
{ \
struct type __node, *__left, *__right, *__tmp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
__left = __right = &__node; \
\
while (1) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp > 0) { \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
}
#define SPLAY_NEGINF -1
#define SPLAY_INF 1
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_INF))
#define SPLAY_FOREACH(x, name, head) \
for ((x) = SPLAY_MIN(name, head); \
(x) != NULL; \
(x) = SPLAY_NEXT(name, head, x))
/* Macros that define a red-black tree */
#define RB_HEAD(name, type) \
struct name { \
struct type *rbh_root; /* root of the tree */ \
}
#define RB_INITIALIZER(root) \
{ NULL }
#define RB_INIT(root) do { \
(root)->rbh_root = NULL; \
} while (/*CONSTCOND*/ 0)
#define RB_BLACK 0
#define RB_RED 1
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#define RB_LEFT(elm, field) (elm)->field.rbe_left
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
#define RB_COLOR(elm, field) (elm)->field.rbe_color
#define RB_ROOT(head) (head)->rbh_root
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET(elm, parent, field) do { \
RB_PARENT(elm, field) = parent; \
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
RB_COLOR(elm, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#define RB_SET_BLACKRED(black, red, field) do { \
RB_COLOR(black, field) = RB_BLACK; \
RB_COLOR(red, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#ifndef RB_AUGMENT
#define RB_AUGMENT(x) do {} while (0)
#endif
#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
(tmp) = RB_RIGHT(elm, field); \
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_LEFT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
(tmp) = RB_LEFT(elm, field); \
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_RIGHT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define RB_PROTOTYPE(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
attr struct type *name##_RB_INSERT(struct name *, struct type *); \
attr struct type *name##_RB_FIND(struct name *, struct type *); \
attr struct type *name##_RB_NFIND(struct name *, struct type *); \
attr struct type *name##_RB_NEXT(struct type *); \
attr struct type *name##_RB_PREV(struct type *); \
attr struct type *name##_RB_MINMAX(struct name *, int); \
\
/* Main rb operation.
* Moves node close to the key of elm to top
*/
#define RB_GENERATE(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp,)
#define RB_GENERATE_STATIC(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
attr void \
name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
{ \
struct type *parent, *gparent, *tmp; \
while ((parent = RB_PARENT(elm, field)) != NULL && \
RB_COLOR(parent, field) == RB_RED) { \
gparent = RB_PARENT(parent, field); \
if (parent == RB_LEFT(gparent, field)) { \
tmp = RB_RIGHT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field); \
elm = gparent; \
continue; \
} \
if (RB_RIGHT(parent, field) == elm) { \
RB_ROTATE_LEFT(head, parent, tmp, field); \
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
} else { \
tmp = RB_LEFT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field); \
elm = gparent; \
continue; \
} \
if (RB_LEFT(parent, field) == elm) { \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_LEFT(head, gparent, tmp, field); \
} \
} \
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
} \
\
attr void \
name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, \
struct type *elm) \
{ \
struct type *tmp; \
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
elm != RB_ROOT(head)) { \
if (RB_LEFT(parent, field) == elm) { \
tmp = RB_RIGHT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_LEFT(head, parent, tmp, field); \
tmp = RB_RIGHT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
struct type *oleft; \
if ((oleft = RB_LEFT(tmp, field)) \
!= NULL) \
RB_COLOR(oleft, field) = RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_RIGHT(head, tmp, oleft, field); \
tmp = RB_RIGHT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_RIGHT(tmp, field)) \
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
RB_ROTATE_LEFT(head, parent, tmp, field); \
elm = RB_ROOT(head); \
break; \
} \
} else { \
tmp = RB_LEFT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
tmp = RB_LEFT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
struct type *oright; \
if ((oright = RB_RIGHT(tmp, field)) \
!= NULL) \
RB_COLOR(oright, field) = RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_LEFT(head, tmp, oright, field); \
tmp = RB_LEFT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_LEFT(tmp, field)) \
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
elm = RB_ROOT(head); \
break; \
} \
} \
} \
if (elm) \
RB_COLOR(elm, field) = RB_BLACK; \
} \
\
attr struct type * \
name##_RB_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *child, *parent, *old = elm; \
int color; \
if (RB_LEFT(elm, field) == NULL) \
child = RB_RIGHT(elm, field); \
else if (RB_RIGHT(elm, field) == NULL) \
child = RB_LEFT(elm, field); \
else { \
struct type *left; \
elm = RB_RIGHT(elm, field); \
while ((left = RB_LEFT(elm, field)) != NULL) \
elm = left; \
child = RB_RIGHT(elm, field); \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
if (RB_PARENT(elm, field) == old) \
parent = elm; \
(elm)->field = (old)->field; \
if (RB_PARENT(old, field)) { \
if (RB_LEFT(RB_PARENT(old, field), field) == old) \
RB_LEFT(RB_PARENT(old, field), field) = elm; \
else \
RB_RIGHT(RB_PARENT(old, field), field) = elm; \
RB_AUGMENT(RB_PARENT(old, field)); \
} else \
RB_ROOT(head) = elm; \
RB_PARENT(RB_LEFT(old, field), field) = elm; \
if (RB_RIGHT(old, field)) \
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
if (parent) { \
left = parent; \
do { \
RB_AUGMENT(left); \
} while ((left = RB_PARENT(left, field)) != NULL); \
} \
goto color; \
} \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
color: \
if (color == RB_BLACK) \
name##_RB_REMOVE_COLOR(head, parent, child); \
return (old); \
} \
\
/* Inserts a node into the RB tree */ \
attr struct type * \
name##_RB_INSERT(struct name *head, struct type *elm) \
{ \
struct type *tmp; \
struct type *parent = NULL; \
int comp = 0; \
tmp = RB_ROOT(head); \
while (tmp) { \
parent = tmp; \
comp = (cmp)(elm, parent); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
RB_SET(elm, parent, field); \
if (parent != NULL) { \
if (comp < 0) \
RB_LEFT(parent, field) = elm; \
else \
RB_RIGHT(parent, field) = elm; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = elm; \
name##_RB_INSERT_COLOR(head, elm); \
return (NULL); \
} \
\
/* Finds the node with the same key as elm */ \
attr struct type * \
name##_RB_FIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (NULL); \
} \
\
/* Finds the first node greater than or equal to the search key */ \
attr struct type * \
name##_RB_NFIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *res = NULL; \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) { \
res = tmp; \
tmp = RB_LEFT(tmp, field); \
} \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (res); \
} \
\
/* ARGSUSED */ \
attr struct type * \
name##_RB_NEXT(struct type *elm) \
{ \
if (RB_RIGHT(elm, field)) { \
elm = RB_RIGHT(elm, field); \
while (RB_LEFT(elm, field)) \
elm = RB_LEFT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
/* ARGSUSED */ \
attr struct type * \
name##_RB_PREV(struct type *elm) \
{ \
if (RB_LEFT(elm, field)) { \
elm = RB_LEFT(elm, field); \
while (RB_RIGHT(elm, field)) \
elm = RB_RIGHT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
attr struct type * \
name##_RB_MINMAX(struct name *head, int val) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *parent = NULL; \
while (tmp) { \
parent = tmp; \
if (val < 0) \
tmp = RB_LEFT(tmp, field); \
else \
tmp = RB_RIGHT(tmp, field); \
} \
return (parent); \
}
#define RB_NEGINF -1
#define RB_INF 1
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
#define RB_PREV(name, x, y) name##_RB_PREV(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_FOREACH(x, name, head) \
for ((x) = RB_MIN(name, head); \
(x) != NULL; \
(x) = name##_RB_NEXT(x))
#define RB_FOREACH_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_SAFE(x, name, head, y) \
for ((x) = RB_MIN(name, head); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE(x, name, head) \
for ((x) = RB_MAX(name, head); \
(x) != NULL; \
(x) = name##_RB_PREV(x))
#define RB_FOREACH_REVERSE_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
for ((x) = RB_MAX(name, head); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#endif /* UV_TREE_H_ */

View File

@ -1,34 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_BSD_H
#define UV_BSD_H
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
#define UV_IO_PRIVATE_PLATFORM_FIELDS \
int rcount; \
int wcount; \
#define UV_HAVE_KQUEUE 1
#endif /* UV_BSD_H */

View File

@ -1,61 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_DARWIN_H
#define UV_DARWIN_H
#if defined(__APPLE__) && defined(__MACH__)
# include <mach/mach.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <TargetConditionals.h>
# define UV_PLATFORM_SEM_T semaphore_t
#endif
#define UV_IO_PRIVATE_PLATFORM_FIELDS \
int rcount; \
int wcount; \
#define UV_PLATFORM_LOOP_FIELDS \
uv_thread_t cf_thread; \
void* cf_cb; \
void* cf_loop; \
uv_mutex_t cf_mutex; \
uv_sem_t cf_sem; \
ngx_queue_t cf_signals; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
char* realpath; \
int realpath_len; \
int cf_flags; \
void* cf_eventstream; \
uv_async_t* cf_cb; \
ngx_queue_t cf_events; \
uv_sem_t cf_sem; \
uv_mutex_t cf_mutex; \
#define UV_STREAM_PRIVATE_PLATFORM_FIELDS \
void* select; \
#define UV_HAVE_KQUEUE 1
#endif /* UV_DARWIN_H */

View File

@ -1,34 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_LINUX_H
#define UV_LINUX_H
#define UV_PLATFORM_LOOP_FIELDS \
uv__io_t inotify_read_watcher; \
void* inotify_watchers; \
int inotify_fd; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
ngx_queue_t watchers; \
int wd; \
#endif /* UV_LINUX_H */

View File

@ -1,44 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_SUNOS_H
#define UV_SUNOS_H
#include <sys/port.h>
#include <port.h>
/* For the sake of convenience and reduced #ifdef-ery in src/unix/sunos.c,
* add the fs_event fields even when this version of SunOS doesn't support
* file watching.
*/
#define UV_PLATFORM_LOOP_FIELDS \
uv__io_t fs_event_watcher; \
int fs_fd; \
#if defined(PORT_SOURCE_FILE)
# define UV_PLATFORM_FS_EVENT_FIELDS \
file_obj_t fo; \
int fd; \
#endif /* defined(PORT_SOURCE_FILE) */
#endif /* UV_SUNOS_H */

View File

@ -1,332 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_UNIX_H
#define UV_UNIX_H
#include "ngx-queue.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <termios.h>
#include <pwd.h>
#include <semaphore.h>
#include <pthread.h>
#include <signal.h>
#if defined(__linux__)
# include "uv-linux.h"
#elif defined(__sun)
# include "uv-sunos.h"
#elif defined(__APPLE__)
# include "uv-darwin.h"
#elif defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# include "uv-bsd.h"
#endif
#ifndef UV_IO_PRIVATE_PLATFORM_FIELDS
# define UV_IO_PRIVATE_PLATFORM_FIELDS /* empty */
#endif
#define UV_IO_PRIVATE_FIELDS \
UV_IO_PRIVATE_PLATFORM_FIELDS \
struct uv__io_s;
struct uv__async;
struct uv_loop_s;
typedef void (*uv__io_cb)(struct uv_loop_s* loop,
struct uv__io_s* w,
unsigned int events);
typedef struct uv__io_s uv__io_t;
struct uv__io_s {
uv__io_cb cb;
ngx_queue_t pending_queue;
ngx_queue_t watcher_queue;
unsigned int pevents; /* Pending event mask i.e. mask at next tick. */
unsigned int events; /* Current event mask. */
int fd;
UV_IO_PRIVATE_FIELDS
};
typedef void (*uv__async_cb)(struct uv_loop_s* loop,
struct uv__async* w,
unsigned int nevents);
struct uv__async {
uv__async_cb cb;
uv__io_t io_watcher;
int wfd;
};
struct uv__work {
void (*work)(struct uv__work *w);
void (*done)(struct uv__work *w, int status);
struct uv_loop_s* loop;
ngx_queue_t wq;
};
#ifndef UV_PLATFORM_SEM_T
# define UV_PLATFORM_SEM_T sem_t
#endif
#ifndef UV_PLATFORM_LOOP_FIELDS
# define UV_PLATFORM_LOOP_FIELDS /* empty */
#endif
#ifndef UV_PLATFORM_FS_EVENT_FIELDS
# define UV_PLATFORM_FS_EVENT_FIELDS /* empty */
#endif
#ifndef UV_STREAM_PRIVATE_PLATFORM_FIELDS
# define UV_STREAM_PRIVATE_PLATFORM_FIELDS /* empty */
#endif
/* Note: May be cast to struct iovec. See writev(2). */
typedef struct {
char* base;
size_t len;
} uv_buf_t;
typedef int uv_file;
typedef int uv_os_sock_t;
typedef struct stat uv_statbuf_t;
#define UV_ONCE_INIT PTHREAD_ONCE_INIT
typedef pthread_once_t uv_once_t;
typedef pthread_t uv_thread_t;
typedef pthread_mutex_t uv_mutex_t;
typedef pthread_rwlock_t uv_rwlock_t;
typedef UV_PLATFORM_SEM_T uv_sem_t;
typedef pthread_cond_t uv_cond_t;
#if defined(__APPLE__) && defined(__MACH__)
typedef struct {
unsigned int n;
unsigned int count;
uv_mutex_t mutex;
uv_sem_t turnstile1;
uv_sem_t turnstile2;
} uv_barrier_t;
#else /* defined(__APPLE__) && defined(__MACH__) */
typedef pthread_barrier_t uv_barrier_t;
#endif /* defined(__APPLE__) && defined(__MACH__) */
/* Platform-specific definitions for uv_spawn support. */
typedef gid_t uv_gid_t;
typedef uid_t uv_uid_t;
/* Platform-specific definitions for uv_dlopen support. */
#define UV_DYNAMIC /* empty */
typedef struct {
void* handle;
char* errmsg;
} uv_lib_t;
#define UV_LOOP_PRIVATE_FIELDS \
unsigned long flags; \
int backend_fd; \
ngx_queue_t pending_queue; \
ngx_queue_t watcher_queue; \
uv__io_t** watchers; \
unsigned int nwatchers; \
unsigned int nfds; \
ngx_queue_t wq; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async; \
uv_handle_t* closing_handles; \
ngx_queue_t process_handles[1]; \
ngx_queue_t prepare_handles; \
ngx_queue_t check_handles; \
ngx_queue_t idle_handles; \
ngx_queue_t async_handles; \
struct uv__async async_watcher; \
/* RB_HEAD(uv__timers, uv_timer_s) */ \
struct uv__timers { \
struct uv_timer_s* rbh_root; \
} timer_handles; \
uint64_t time; \
int signal_pipefd[2]; \
uv__io_t signal_io_watcher; \
uv_signal_t child_watcher; \
int emfile_fd; \
uint64_t timer_counter; \
UV_PLATFORM_LOOP_FIELDS \
#define UV_REQ_TYPE_PRIVATE /* empty */
#define UV_REQ_PRIVATE_FIELDS /* empty */
#define UV_PRIVATE_REQ_TYPES /* empty */
#define UV_WRITE_PRIVATE_FIELDS \
ngx_queue_t queue; \
int write_index; \
uv_buf_t* bufs; \
int bufcnt; \
int error; \
uv_buf_t bufsml[4]; \
#define UV_CONNECT_PRIVATE_FIELDS \
ngx_queue_t queue; \
#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */
#define UV_UDP_SEND_PRIVATE_FIELDS \
ngx_queue_t queue; \
struct sockaddr_in6 addr; \
int bufcnt; \
uv_buf_t* bufs; \
ssize_t status; \
uv_udp_send_cb send_cb; \
uv_buf_t bufsml[4]; \
#define UV_HANDLE_PRIVATE_FIELDS \
int flags; \
uv_handle_t* next_closing; \
#define UV_STREAM_PRIVATE_FIELDS \
uv_connect_t *connect_req; \
uv_shutdown_t *shutdown_req; \
uv__io_t io_watcher; \
ngx_queue_t write_queue; \
ngx_queue_t write_completed_queue; \
uv_connection_cb connection_cb; \
int delayed_error; \
int accepted_fd; \
UV_STREAM_PRIVATE_PLATFORM_FIELDS \
#define UV_TCP_PRIVATE_FIELDS /* empty */
#define UV_UDP_PRIVATE_FIELDS \
uv_alloc_cb alloc_cb; \
uv_udp_recv_cb recv_cb; \
uv__io_t io_watcher; \
ngx_queue_t write_queue; \
ngx_queue_t write_completed_queue; \
#define UV_PIPE_PRIVATE_FIELDS \
const char* pipe_fname; /* strdup'ed */
#define UV_POLL_PRIVATE_FIELDS \
uv__io_t io_watcher;
#define UV_PREPARE_PRIVATE_FIELDS \
uv_prepare_cb prepare_cb; \
ngx_queue_t queue;
#define UV_CHECK_PRIVATE_FIELDS \
uv_check_cb check_cb; \
ngx_queue_t queue;
#define UV_IDLE_PRIVATE_FIELDS \
uv_idle_cb idle_cb; \
ngx_queue_t queue;
#define UV_ASYNC_PRIVATE_FIELDS \
uv_async_cb async_cb; \
ngx_queue_t queue; \
int pending; \
#define UV_TIMER_PRIVATE_FIELDS \
/* RB_ENTRY(uv_timer_s) tree_entry; */ \
struct { \
struct uv_timer_s* rbe_left; \
struct uv_timer_s* rbe_right; \
struct uv_timer_s* rbe_parent; \
int rbe_color; \
} tree_entry; \
uv_timer_cb timer_cb; \
uint64_t timeout; \
uint64_t repeat; \
uint64_t start_id;
#define UV_GETADDRINFO_PRIVATE_FIELDS \
struct uv__work work_req; \
uv_getaddrinfo_cb cb; \
struct addrinfo* hints; \
char* hostname; \
char* service; \
struct addrinfo* res; \
int retcode;
#define UV_PROCESS_PRIVATE_FIELDS \
ngx_queue_t queue; \
int errorno; \
#define UV_FS_PRIVATE_FIELDS \
const char *new_path; \
uv_file file; \
int flags; \
mode_t mode; \
void* buf; \
size_t len; \
off_t off; \
uv_uid_t uid; \
uv_gid_t gid; \
double atime; \
double mtime; \
struct uv__work work_req; \
#define UV_WORK_PRIVATE_FIELDS \
struct uv__work work_req;
#define UV_TTY_PRIVATE_FIELDS \
struct termios orig_termios; \
int mode;
#define UV_SIGNAL_PRIVATE_FIELDS \
/* RB_ENTRY(uv_signal_s) tree_entry; */ \
struct { \
struct uv_signal_s* rbe_left; \
struct uv_signal_s* rbe_right; \
struct uv_signal_s* rbe_parent; \
int rbe_color; \
} tree_entry; \
/* Use two counters here so we don have to fiddle with atomics. */ \
unsigned int caught_signals; \
unsigned int dispatched_signals;
#define UV_FS_EVENT_PRIVATE_FIELDS \
uv_fs_event_cb cb; \
UV_PLATFORM_FS_EVENT_FIELDS \
#endif /* UV_UNIX_H */

View File

@ -1,585 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0502
#endif
#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
typedef intptr_t ssize_t;
# define _SSIZE_T_
# define _SSIZE_T_DEFINED
#endif
#include <winsock2.h>
#include <mswsock.h>
#include <ws2tcpip.h>
#include <windows.h>
#include <process.h>
#include <signal.h>
#include <sys/stat.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv-private/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "tree.h"
#include "ngx-queue.h"
#define MAX_PIPENAME_LEN 256
#ifndef S_IFLNK
# define S_IFLNK 0xA000
#endif
/* Additional signals supported by uv_signal and or uv_kill. The CRT defines
* the following signals already:
*
* #define SIGINT 2
* #define SIGILL 4
* #define SIGABRT_COMPAT 6
* #define SIGFPE 8
* #define SIGSEGV 11
* #define SIGTERM 15
* #define SIGBREAK 21
* #define SIGABRT 22
*
* The additional signals have values that are common on other Unix
* variants (Linux and Darwin)
*/
#define SIGHUP 1
#define SIGKILL 9
#define SIGWINCH 28
/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many */
/* unix-like platforms. However MinGW doesn't define it, so we do. */
#ifndef SIGABRT_COMPAT
# define SIGABRT_COMPAT 6
#endif
/*
* Guids and typedefs for winsock extension functions
* Mingw32 doesn't have these :-(
*/
#ifndef WSAID_ACCEPTEX
# define WSAID_ACCEPTEX \
{0xb5367df1, 0xcbac, 0x11cf, \
{0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
# define WSAID_CONNECTEX \
{0x25a207b9, 0xddf3, 0x4660, \
{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}}
# define WSAID_GETACCEPTEXSOCKADDRS \
{0xb5367df2, 0xcbac, 0x11cf, \
{0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
# define WSAID_DISCONNECTEX \
{0x7fda2e11, 0x8630, 0x436f, \
{0xa0, 0x31, 0xf5, 0x36, 0xa6, 0xee, 0xc1, 0x57}}
# define WSAID_TRANSMITFILE \
{0xb5367df0, 0xcbac, 0x11cf, \
{0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
typedef BOOL PASCAL (*LPFN_ACCEPTEX)
(SOCKET sListenSocket,
SOCKET sAcceptSocket,
PVOID lpOutputBuffer,
DWORD dwReceiveDataLength,
DWORD dwLocalAddressLength,
DWORD dwRemoteAddressLength,
LPDWORD lpdwBytesReceived,
LPOVERLAPPED lpOverlapped);
typedef BOOL PASCAL (*LPFN_CONNECTEX)
(SOCKET s,
const struct sockaddr* name,
int namelen,
PVOID lpSendBuffer,
DWORD dwSendDataLength,
LPDWORD lpdwBytesSent,
LPOVERLAPPED lpOverlapped);
typedef void PASCAL (*LPFN_GETACCEPTEXSOCKADDRS)
(PVOID lpOutputBuffer,
DWORD dwReceiveDataLength,
DWORD dwLocalAddressLength,
DWORD dwRemoteAddressLength,
LPSOCKADDR* LocalSockaddr,
LPINT LocalSockaddrLength,
LPSOCKADDR* RemoteSockaddr,
LPINT RemoteSockaddrLength);
typedef BOOL PASCAL (*LPFN_DISCONNECTEX)
(SOCKET hSocket,
LPOVERLAPPED lpOverlapped,
DWORD dwFlags,
DWORD reserved);
typedef BOOL PASCAL (*LPFN_TRANSMITFILE)
(SOCKET hSocket,
HANDLE hFile,
DWORD nNumberOfBytesToWrite,
DWORD nNumberOfBytesPerSend,
LPOVERLAPPED lpOverlapped,
LPTRANSMIT_FILE_BUFFERS lpTransmitBuffers,
DWORD dwFlags);
typedef PVOID RTL_SRWLOCK;
typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
#endif
typedef int (WSAAPI* LPFN_WSARECV)
(SOCKET socket,
LPWSABUF buffers,
DWORD buffer_count,
LPDWORD bytes,
LPDWORD flags,
LPWSAOVERLAPPED overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
typedef int (WSAAPI* LPFN_WSARECVFROM)
(SOCKET socket,
LPWSABUF buffers,
DWORD buffer_count,
LPDWORD bytes,
LPDWORD flags,
struct sockaddr* addr,
LPINT addr_len,
LPWSAOVERLAPPED overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
#ifndef _NTDEF_
typedef LONG NTSTATUS;
typedef NTSTATUS *PNTSTATUS;
#endif
#ifndef RTL_CONDITION_VARIABLE_INIT
typedef PVOID CONDITION_VARIABLE, *PCONDITION_VARIABLE;
#endif
typedef struct _AFD_POLL_HANDLE_INFO {
HANDLE Handle;
ULONG Events;
NTSTATUS Status;
} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
typedef struct _AFD_POLL_INFO {
LARGE_INTEGER Timeout;
ULONG NumberOfHandles;
ULONG Exclusive;
AFD_POLL_HANDLE_INFO Handles[1];
} AFD_POLL_INFO, *PAFD_POLL_INFO;
#define UV_MSAFD_PROVIDER_COUNT 3
/**
* It should be possible to cast uv_buf_t[] to WSABUF[]
* see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx
*/
typedef struct uv_buf_t {
ULONG len;
char* base;
} uv_buf_t;
typedef int uv_file;
typedef struct _stati64 uv_statbuf_t;
typedef SOCKET uv_os_sock_t;
typedef HANDLE uv_thread_t;
typedef HANDLE uv_sem_t;
typedef CRITICAL_SECTION uv_mutex_t;
/* This condition variable implementation is based on the SetEvent solution
* (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
* We could not use the SignalObjectAndWait solution (section 3.4) because
* it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and
* uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs.
*/
typedef union {
CONDITION_VARIABLE cond_var;
struct {
unsigned int waiters_count;
CRITICAL_SECTION waiters_count_lock;
HANDLE signal_event;
HANDLE broadcast_event;
} fallback;
} uv_cond_t;
typedef union {
/* srwlock_ has type SRWLOCK, but not all toolchains define this type in */
/* windows.h. */
SRWLOCK srwlock_;
struct {
uv_mutex_t read_mutex_;
uv_mutex_t write_mutex_;
unsigned int num_readers_;
} fallback_;
} uv_rwlock_t;
typedef struct {
unsigned int n;
unsigned int count;
uv_mutex_t mutex;
uv_sem_t turnstile1;
uv_sem_t turnstile2;
} uv_barrier_t;
#define UV_ONCE_INIT { 0, NULL }
typedef struct uv_once_s {
unsigned char ran;
HANDLE event;
} uv_once_t;
/* Platform-specific definitions for uv_spawn support. */
typedef unsigned char uv_uid_t;
typedef unsigned char uv_gid_t;
/* Platform-specific definitions for uv_dlopen support. */
#define UV_DYNAMIC FAR WINAPI
typedef struct {
HMODULE handle;
char* errmsg;
} uv_lib_t;
RB_HEAD(uv_timer_tree_s, uv_timer_s);
#define UV_LOOP_PRIVATE_FIELDS \
/* The loop's I/O completion port */ \
HANDLE iocp; \
/* The current time according to the event loop. in msecs. */ \
uint64_t time; \
/* Tail of a single-linked circular queue of pending reqs. If the queue */ \
/* is empty, tail_ is NULL. If there is only one item, */ \
/* tail_->next_req == tail_ */ \
uv_req_t* pending_reqs_tail; \
/* Head of a single-linked list of closed handles */ \
uv_handle_t* endgame_handles; \
/* The head of the timers tree */ \
struct uv_timer_tree_s timers; \
/* Lists of active loop (prepare / check / idle) watchers */ \
uv_prepare_t* prepare_handles; \
uv_check_t* check_handles; \
uv_idle_t* idle_handles; \
/* This pointer will refer to the prepare/check/idle handle whose */ \
/* callback is scheduled to be called next. This is needed to allow */ \
/* safe removal from one of the lists above while that list being */ \
/* iterated over. */ \
uv_prepare_t* next_prepare_handle; \
uv_check_t* next_check_handle; \
uv_idle_t* next_idle_handle; \
/* This handle holds the peer sockets for the fast variant of uv_poll_t */ \
SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \
/* Counter to keep track of active tcp streams */ \
unsigned int active_tcp_streams; \
/* Counter to keep track of active udp streams */ \
unsigned int active_udp_streams; \
/* Counter to started timer */ \
uint64_t timer_counter;
#define UV_REQ_TYPE_PRIVATE \
/* TODO: remove the req suffix */ \
UV_ACCEPT, \
UV_FS_EVENT_REQ, \
UV_POLL_REQ, \
UV_PROCESS_EXIT, \
UV_READ, \
UV_UDP_RECV, \
UV_WAKEUP, \
UV_SIGNAL_REQ,
#define UV_REQ_PRIVATE_FIELDS \
union { \
/* Used by I/O operations */ \
struct { \
OVERLAPPED overlapped; \
size_t queued_bytes; \
}; \
}; \
struct uv_req_s* next_req;
#define UV_WRITE_PRIVATE_FIELDS \
int ipc_header; \
uv_buf_t write_buffer; \
HANDLE event_handle; \
HANDLE wait_handle;
#define UV_CONNECT_PRIVATE_FIELDS \
/* empty */
#define UV_SHUTDOWN_PRIVATE_FIELDS \
/* empty */
#define UV_UDP_SEND_PRIVATE_FIELDS \
/* empty */
#define UV_PRIVATE_REQ_TYPES \
typedef struct uv_pipe_accept_s { \
UV_REQ_FIELDS \
HANDLE pipeHandle; \
struct uv_pipe_accept_s* next_pending; \
} uv_pipe_accept_t; \
\
typedef struct uv_tcp_accept_s { \
UV_REQ_FIELDS \
SOCKET accept_socket; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32]; \
HANDLE event_handle; \
HANDLE wait_handle; \
struct uv_tcp_accept_s* next_pending; \
} uv_tcp_accept_t; \
\
typedef struct uv_read_s { \
UV_REQ_FIELDS \
HANDLE event_handle; \
HANDLE wait_handle; \
} uv_read_t;
#define uv_stream_connection_fields \
unsigned int write_reqs_pending; \
uv_shutdown_t* shutdown_req;
#define uv_stream_server_fields \
uv_connection_cb connection_cb;
#define UV_STREAM_PRIVATE_FIELDS \
unsigned int reqs_pending; \
int activecnt; \
uv_read_t read_req; \
union { \
struct { uv_stream_connection_fields }; \
struct { uv_stream_server_fields }; \
};
#define uv_tcp_server_fields \
uv_tcp_accept_t* accept_reqs; \
unsigned int processed_accepts; \
uv_tcp_accept_t* pending_accepts; \
LPFN_ACCEPTEX func_acceptex;
#define uv_tcp_connection_fields \
uv_buf_t read_buffer; \
LPFN_CONNECTEX func_connectex;
#define UV_TCP_PRIVATE_FIELDS \
SOCKET socket; \
int bind_error; \
union { \
struct { uv_tcp_server_fields }; \
struct { uv_tcp_connection_fields }; \
};
#define UV_UDP_PRIVATE_FIELDS \
SOCKET socket; \
unsigned int reqs_pending; \
int activecnt; \
uv_req_t recv_req; \
uv_buf_t recv_buffer; \
struct sockaddr_storage recv_from; \
int recv_from_len; \
uv_udp_recv_cb recv_cb; \
uv_alloc_cb alloc_cb; \
LPFN_WSARECV func_wsarecv; \
LPFN_WSARECVFROM func_wsarecvfrom;
#define uv_pipe_server_fields \
int pending_instances; \
uv_pipe_accept_t* accept_reqs; \
uv_pipe_accept_t* pending_accepts;
#define uv_pipe_connection_fields \
uv_timer_t* eof_timer; \
uv_write_t ipc_header_write_req; \
int ipc_pid; \
uint64_t remaining_ipc_rawdata_bytes; \
unsigned char reserved[sizeof(void*)]; \
struct { \
WSAPROTOCOL_INFOW* socket_info; \
int tcp_connection; \
} pending_ipc_info; \
uv_write_t* non_overlapped_writes_tail;
#define UV_PIPE_PRIVATE_FIELDS \
HANDLE handle; \
WCHAR* name; \
union { \
struct { uv_pipe_server_fields }; \
struct { uv_pipe_connection_fields }; \
};
/* TODO: put the parser states in an union - TTY handles are always */
/* half-duplex so read-state can safely overlap write-state. */
#define UV_TTY_PRIVATE_FIELDS \
HANDLE handle; \
union { \
struct { \
/* Used for readable TTY handles */ \
HANDLE read_line_handle; \
uv_buf_t read_line_buffer; \
HANDLE read_raw_wait; \
/* Fields used for translating win keystrokes into vt100 characters */ \
char last_key[8]; \
unsigned char last_key_offset; \
unsigned char last_key_len; \
WCHAR last_utf16_high_surrogate; \
INPUT_RECORD last_input_record; \
}; \
struct { \
/* Used for writable TTY handles */ \
/* utf8-to-utf16 conversion state */ \
unsigned int utf8_codepoint; \
unsigned char utf8_bytes_left; \
/* eol conversion state */ \
unsigned char previous_eol; \
/* ansi parser state */ \
unsigned char ansi_parser_state; \
unsigned char ansi_csi_argc; \
unsigned short ansi_csi_argv[4]; \
COORD saved_position; \
WORD saved_attributes; \
}; \
};
#define UV_POLL_PRIVATE_FIELDS \
SOCKET socket; \
/* Used in fast mode */ \
SOCKET peer_socket; \
AFD_POLL_INFO afd_poll_info_1; \
AFD_POLL_INFO afd_poll_info_2; \
/* Used in fast and slow mode. */ \
uv_req_t poll_req_1; \
uv_req_t poll_req_2; \
unsigned char submitted_events_1; \
unsigned char submitted_events_2; \
unsigned char mask_events_1; \
unsigned char mask_events_2; \
unsigned char events;
#define UV_TIMER_PRIVATE_FIELDS \
RB_ENTRY(uv_timer_s) tree_entry; \
uint64_t due; \
uint64_t repeat; \
uint64_t start_id; \
uv_timer_cb timer_cb;
#define UV_ASYNC_PRIVATE_FIELDS \
struct uv_req_s async_req; \
uv_async_cb async_cb; \
/* char to avoid alignment issues */ \
char volatile async_sent;
#define UV_PREPARE_PRIVATE_FIELDS \
uv_prepare_t* prepare_prev; \
uv_prepare_t* prepare_next; \
uv_prepare_cb prepare_cb;
#define UV_CHECK_PRIVATE_FIELDS \
uv_check_t* check_prev; \
uv_check_t* check_next; \
uv_check_cb check_cb;
#define UV_IDLE_PRIVATE_FIELDS \
uv_idle_t* idle_prev; \
uv_idle_t* idle_next; \
uv_idle_cb idle_cb;
#define UV_HANDLE_PRIVATE_FIELDS \
uv_handle_t* endgame_next; \
unsigned int flags;
#define UV_GETADDRINFO_PRIVATE_FIELDS \
uv_getaddrinfo_cb getaddrinfo_cb; \
void* alloc; \
WCHAR* node; \
WCHAR* service; \
struct addrinfoW* hints; \
struct addrinfoW* res; \
int retcode;
#define UV_PROCESS_PRIVATE_FIELDS \
struct uv_process_exit_s { \
UV_REQ_FIELDS \
} exit_req; \
BYTE* child_stdio_buffer; \
uv_err_t spawn_error; \
int exit_signal; \
HANDLE wait_handle; \
HANDLE process_handle; \
volatile char exit_cb_pending;
#define UV_FS_PRIVATE_FIELDS \
int flags; \
DWORD sys_errno_; \
union { \
/* TODO: remove me in 0.9. */ \
WCHAR* pathw; \
int fd; \
}; \
union { \
struct { \
int mode; \
WCHAR* new_pathw; \
int file_flags; \
int fd_out; \
void* buf; \
size_t length; \
int64_t offset; \
}; \
struct { \
double atime; \
double mtime; \
}; \
};
#define UV_WORK_PRIVATE_FIELDS \
#define UV_FS_EVENT_PRIVATE_FIELDS \
struct uv_fs_event_req_s { \
UV_REQ_FIELDS \
} req; \
HANDLE dir_handle; \
int req_pending; \
uv_fs_event_cb cb; \
WCHAR* filew; \
WCHAR* short_filew; \
WCHAR* dirw; \
char* buffer;
#define UV_SIGNAL_PRIVATE_FIELDS \
RB_ENTRY(uv_signal_s) tree_entry; \
struct uv_req_s signal_req; \
unsigned long pending_signum;
int uv_utf16_to_utf8(const WCHAR* utf16Buffer, size_t utf16Size,
char* utf8Buffer, size_t utf8Size);
int uv_utf8_to_utf16(const char* utf8Buffer, WCHAR* utf16Buffer,
size_t utf16Size);

File diff suppressed because it is too large Load Diff

View File

@ -1,248 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
struct poll_ctx {
uv_fs_poll_t* parent_handle; /* NULL if parent has been stopped or closed */
int busy_polling;
unsigned int interval;
uint64_t start_time;
uv_loop_t* loop;
uv_fs_poll_cb poll_cb;
uv_timer_t timer_handle;
uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
uv_statbuf_t statbuf;
char path[1]; /* variable length */
};
static int statbuf_eq(const uv_statbuf_t* a, const uv_statbuf_t* b);
static void poll_cb(uv_fs_t* req);
static void timer_cb(uv_timer_t* timer, int status);
static void timer_close_cb(uv_handle_t* handle);
static uv_statbuf_t zero_statbuf;
int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
return 0;
}
int uv_fs_poll_start(uv_fs_poll_t* handle,
uv_fs_poll_cb cb,
const char* path,
unsigned int interval) {
struct poll_ctx* ctx;
uv_loop_t* loop;
size_t len;
if (uv__is_active(handle))
return 0;
loop = handle->loop;
len = strlen(path);
ctx = calloc(1, sizeof(*ctx) + len);
if (ctx == NULL)
return uv__set_artificial_error(loop, UV_ENOMEM);
ctx->loop = loop;
ctx->poll_cb = cb;
ctx->interval = interval ? interval : 1;
ctx->start_time = uv_now(loop);
ctx->parent_handle = handle;
memcpy(ctx->path, path, len + 1);
if (uv_timer_init(loop, &ctx->timer_handle))
abort();
ctx->timer_handle.flags |= UV__HANDLE_INTERNAL;
uv__handle_unref(&ctx->timer_handle);
if (uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb))
abort();
handle->poll_ctx = ctx;
uv__handle_start(handle);
return 0;
}
int uv_fs_poll_stop(uv_fs_poll_t* handle) {
struct poll_ctx* ctx;
if (!uv__is_active(handle))
return 0;
ctx = handle->poll_ctx;
assert(ctx != NULL);
assert(ctx->parent_handle != NULL);
ctx->parent_handle = NULL;
handle->poll_ctx = NULL;
/* Close the timer if it's active. If it's inactive, there's a stat request
* in progress and poll_cb will take care of the cleanup.
*/
if (uv__is_active(&ctx->timer_handle))
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv__handle_stop(handle);
return 0;
}
void uv__fs_poll_close(uv_fs_poll_t* handle) {
uv_fs_poll_stop(handle);
}
static void timer_cb(uv_timer_t* timer, int status) {
struct poll_ctx* ctx;
ctx = container_of(timer, struct poll_ctx, timer_handle);
assert(ctx->parent_handle != NULL);
assert(ctx->parent_handle->poll_ctx == ctx);
ctx->start_time = uv_now(ctx->loop);
if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb))
abort();
}
static void poll_cb(uv_fs_t* req) {
uv_statbuf_t* statbuf;
struct poll_ctx* ctx;
uint64_t interval;
ctx = container_of(req, struct poll_ctx, fs_req);
if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv_fs_req_cleanup(req);
return;
}
if (req->result != 0) {
if (ctx->busy_polling != -req->errorno) {
uv__set_artificial_error(ctx->loop, req->errorno);
ctx->poll_cb(ctx->parent_handle, -1, &ctx->statbuf, &zero_statbuf);
ctx->busy_polling = -req->errorno;
}
goto out;
}
statbuf = &req->statbuf;
if (ctx->busy_polling != 0)
if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
ctx->statbuf = *statbuf;
ctx->busy_polling = 1;
out:
uv_fs_req_cleanup(req);
if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
return;
}
/* Reschedule timer, subtract the delay from doing the stat(). */
interval = ctx->interval;
interval -= (uv_now(ctx->loop) - ctx->start_time) % interval;
if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0))
abort();
}
static void timer_close_cb(uv_handle_t* handle) {
free(container_of(handle, struct poll_ctx, timer_handle));
}
static int statbuf_eq(const uv_statbuf_t* a, const uv_statbuf_t* b) {
#if defined(_WIN32)
return a->st_mtime == b->st_mtime
&& a->st_size == b->st_size
&& a->st_mode == b->st_mode;
#else
/* Jump through a few hoops to get sub-second granularity on Linux. */
# if defined(__linux__)
# if defined(__USE_MISC) /* _BSD_SOURCE || _SVID_SOURCE */
if (a->st_ctim.tv_nsec != b->st_ctim.tv_nsec) return 0;
if (a->st_mtim.tv_nsec != b->st_mtim.tv_nsec) return 0;
# else
if (a->st_ctimensec != b->st_ctimensec) return 0;
if (a->st_mtimensec != b->st_mtimensec) return 0;
# endif
# endif
/* Jump through different hoops on OS X. */
# if defined(__APPLE__)
# if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
if (a->st_ctimespec.tv_nsec != b->st_ctimespec.tv_nsec) return 0;
if (a->st_mtimespec.tv_nsec != b->st_mtimespec.tv_nsec) return 0;
# else
if (a->st_ctimensec != b->st_ctimensec) return 0;
if (a->st_mtimensec != b->st_mtimensec) return 0;
# endif
# endif
/* TODO(bnoordhuis) Other Unices have st_ctim and friends too, provided
* the stars and compiler flags are right...
*/
return a->st_ctime == b->st_ctime
&& a->st_mtime == b->st_mtime
&& a->st_size == b->st_size
&& a->st_mode == b->st_mode
&& a->st_uid == b->st_uid
&& a->st_gid == b->st_gid
&& a->st_ino == b->st_ino
&& a->st_dev == b->st_dev;
#endif
}
#if defined(_WIN32)
#include "win/internal.h"
#include "win/handle-inl.h"
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
assert(handle->flags & UV__HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
#endif /* _WIN32 */

View File

@ -1,298 +0,0 @@
/*
* Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
* Copyright (c) 1996-1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <stdio.h>
#include <string.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv-private/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv.h"
#include "uv-common.h"
static const uv_err_t uv_eafnosupport_ = { UV_EAFNOSUPPORT, 0 };
static const uv_err_t uv_enospc_ = { UV_ENOSPC, 0 };
static const uv_err_t uv_einval_ = { UV_EINVAL, 0 };
static uv_err_t inet_ntop4(const unsigned char *src, char *dst, size_t size);
static uv_err_t inet_ntop6(const unsigned char *src, char *dst, size_t size);
static uv_err_t inet_pton4(const char *src, unsigned char *dst);
static uv_err_t inet_pton6(const char *src, unsigned char *dst);
uv_err_t uv_inet_ntop(int af, const void* src, char* dst, size_t size) {
switch (af) {
case AF_INET:
return (inet_ntop4(src, dst, size));
case AF_INET6:
return (inet_ntop6(src, dst, size));
default:
return uv_eafnosupport_;
}
/* NOTREACHED */
}
static uv_err_t inet_ntop4(const unsigned char *src, char *dst, size_t size) {
static const char fmt[] = "%u.%u.%u.%u";
char tmp[sizeof "255.255.255.255"];
int l;
#ifndef _WIN32
l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
#else
l = _snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
#endif
if (l <= 0 || (size_t) l >= size) {
return uv_enospc_;
}
strncpy(dst, tmp, size);
dst[size - 1] = '\0';
return uv_ok_;
}
static uv_err_t inet_ntop6(const unsigned char *src, char *dst, size_t size) {
/*
* Note that int32_t and int16_t need only be "at least" large enough
* to contain a value of the specified size. On some systems, like
* Crays, there is no such thing as an integer variable with 16 bits.
* Keep this in mind if you think this function should have been coded
* to use pointer overlays. All the world's not a VAX.
*/
char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp;
struct { int base, len; } best, cur;
unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)];
int i;
/*
* Preprocess:
* Copy the input (bytewise) array into a wordwise array.
* Find the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < (int) sizeof(struct in6_addr); i++)
words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
best.base = -1;
best.len = 0;
cur.base = -1;
cur.len = 0;
for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
if (words[i] == 0) {
if (cur.base == -1)
cur.base = i, cur.len = 1;
else
cur.len++;
} else {
if (cur.base != -1) {
if (best.base == -1 || cur.len > best.len)
best = cur;
cur.base = -1;
}
}
}
if (cur.base != -1) {
if (best.base == -1 || cur.len > best.len)
best = cur;
}
if (best.base != -1 && best.len < 2)
best.base = -1;
/*
* Format the result.
*/
tp = tmp;
for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
/* Are we inside the best run of 0x00's? */
if (best.base != -1 && i >= best.base &&
i < (best.base + best.len)) {
if (i == best.base)
*tp++ = ':';
continue;
}
/* Are we following an initial run of 0x00s or any real hex? */
if (i != 0)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
(best.len == 7 && words[7] != 0x0001) ||
(best.len == 5 && words[5] == 0xffff))) {
uv_err_t err = inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp));
if (err.code != UV_OK)
return err;
tp += strlen(tp);
break;
}
tp += snprintf(tp, tmp + sizeof tmp - tp, "%x", words[i]);
}
/* Was it a trailing run of 0x00's? */
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
*tp++ = ':';
*tp++ = '\0';
/*
* Check for overflow, copy, and we're done.
*/
if ((size_t)(tp - tmp) > size) {
return uv_enospc_;
}
uv_strlcpy(dst, tmp, size);
return uv_ok_;
}
uv_err_t uv_inet_pton(int af, const char* src, void* dst) {
switch (af) {
case AF_INET:
return (inet_pton4(src, dst));
case AF_INET6:
return (inet_pton6(src, dst));
default:
return uv_eafnosupport_;
}
/* NOTREACHED */
}
static uv_err_t inet_pton4(const char *src, unsigned char *dst) {
static const char digits[] = "0123456789";
int saw_digit, octets, ch;
unsigned char tmp[sizeof(struct in_addr)], *tp;
saw_digit = 0;
octets = 0;
*(tp = tmp) = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr(digits, ch)) != NULL) {
unsigned int nw = *tp * 10 + (pch - digits);
if (saw_digit && *tp == 0)
return uv_einval_;
if (nw > 255)
return uv_einval_;
*tp = nw;
if (!saw_digit) {
if (++octets > 4)
return uv_einval_;
saw_digit = 1;
}
} else if (ch == '.' && saw_digit) {
if (octets == 4)
return uv_einval_;
*++tp = 0;
saw_digit = 0;
} else
return uv_einval_;
}
if (octets < 4)
return uv_einval_;
memcpy(dst, tmp, sizeof(struct in_addr));
return uv_ok_;
}
static uv_err_t inet_pton6(const char *src, unsigned char *dst) {
static const char xdigits_l[] = "0123456789abcdef",
xdigits_u[] = "0123456789ABCDEF";
unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp;
const char *xdigits, *curtok;
int ch, seen_xdigits;
unsigned int val;
memset((tp = tmp), '\0', sizeof tmp);
endp = tp + sizeof tmp;
colonp = NULL;
/* Leading :: requires some special handling. */
if (*src == ':')
if (*++src != ':')
return uv_einval_;
curtok = src;
seen_xdigits = 0;
val = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
pch = strchr((xdigits = xdigits_u), ch);
if (pch != NULL) {
val <<= 4;
val |= (pch - xdigits);
if (++seen_xdigits > 4)
return uv_einval_;
continue;
}
if (ch == ':') {
curtok = src;
if (!seen_xdigits) {
if (colonp)
return uv_einval_;
colonp = tp;
continue;
} else if (*src == '\0') {
return uv_einval_;
}
if (tp + sizeof(uint16_t) > endp)
return uv_einval_;
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
seen_xdigits = 0;
val = 0;
continue;
}
if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) {
uv_err_t err = inet_pton4(curtok, tp);
if (err.code == 0) {
tp += sizeof(struct in_addr);
seen_xdigits = 0;
break; /*%< '\\0' was seen by inet_pton4(). */
}
}
return uv_einval_;
}
if (seen_xdigits) {
if (tp + sizeof(uint16_t) > endp)
return uv_einval_;
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
}
if (colonp != NULL) {
/*
* Since some memmove()'s erroneously fail to handle
* overlapping regions, we'll do the shift by hand.
*/
const int n = tp - colonp;
int i;
if (tp == endp)
return uv_einval_;
for (i = 1; i <= n; i++) {
endp[- i] = colonp[n - i];
colonp[n - i] = 0;
}
tp = endp;
}
if (tp != endp)
return uv_einval_;
memcpy(dst, tmp, sizeof tmp);
return uv_ok_;
}

View File

@ -1,391 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <utmp.h>
#include <sys/protosw.h>
#include <libperfstat.h>
#include <sys/proc.h>
#include <sys/procfs.h>
uint64_t uv__hrtime(void) {
uint64_t G = 1000000000;
timebasestruct_t t;
read_wall_time(&t, TIMEBASE_SZ);
time_base_to_time(&t, TIMEBASE_SZ);
return (uint64_t) t.tb_high * G + t.tb_low;
}
/*
* We could use a static buffer for the path manipulations that we need outside
* of the function, but this function could be called by multiple consumers and
* we don't want to potentially create a race condition in the use of snprintf.
*/
int uv_exepath(char* buffer, size_t* size) {
ssize_t res;
char pp[64], cwdl[PATH_MAX];
struct psinfo ps;
int fd;
if (buffer == NULL)
return (-1);
if (size == NULL)
return (-1);
(void) snprintf(pp, sizeof(pp), "/proc/%lu/cwd", (unsigned long) getpid());
res = readlink(pp, cwdl, sizeof(cwdl) - 1);
if (res < 0)
return res;
cwdl[res] = '\0';
(void) snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
fd = open(pp, O_RDONLY);
if (fd < 0)
return fd;
res = read(fd, &ps, sizeof(ps));
close(fd);
if (res < 0)
return res;
(void) snprintf(buffer, *size, "%s%s", cwdl, ps.pr_fname);
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
perfstat_memory_total_t mem_total;
int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
if (result == -1) {
return 0;
}
return mem_total.real_free * 4096;
}
uint64_t uv_get_total_memory(void) {
perfstat_memory_total_t mem_total;
int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
if (result == -1) {
return 0;
}
return mem_total.real_total * 4096;
}
void uv_loadavg(double avg[3]) {
perfstat_cpu_total_t ps_total;
int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
if (result == -1) {
avg[0] = 0.; avg[1] = 0.; avg[2] = 0.;
return;
}
avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS);
avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS);
avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS);
}
int uv_fs_event_init(uv_loop_t* loop,
uv_fs_event_t* handle,
const char* filename,
uv_fs_event_cb cb,
int flags) {
loop->counters.fs_event_init++;
uv__set_sys_error(loop, ENOSYS);
return -1;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
UNREACHABLE();
}
char** uv_setup_args(int argc, char** argv) {
return argv;
}
uv_err_t uv_set_process_title(const char* title) {
return uv_ok_;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
if (size > 0) {
buffer[0] = '\0';
}
return uv_ok_;
}
uv_err_t uv_resident_set_memory(size_t* rss) {
char pp[64];
psinfo_t psinfo;
uv_err_t err;
int fd;
(void) snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
fd = open(pp, O_RDONLY);
if (fd == -1)
return uv__new_sys_error(errno);
err = uv_ok_;
if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo))
*rss = (size_t)psinfo.pr_rssize * 1024;
else
err = uv__new_sys_error(EINVAL);
close(fd);
return err;
}
uv_err_t uv_uptime(double* uptime) {
struct utmp *utmp_buf;
size_t entries = 0;
time_t boot_time;
utmpname(UTMP_FILE);
setutent();
while ((utmp_buf = getutent()) != NULL) {
if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS)
++entries;
if (utmp_buf->ut_type == BOOT_TIME)
boot_time = utmp_buf->ut_time;
}
endutent();
if (boot_time == 0)
return uv__new_artificial_error(UV_ENOSYS);
*uptime = time(NULL) - boot_time;
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uv_cpu_info_t* cpu_info;
perfstat_cpu_total_t ps_total;
perfstat_cpu_t* ps_cpus;
perfstat_id_t cpu_id;
int result, ncpus, idx = 0;
result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
if (result == -1) {
return uv__new_artificial_error(UV_ENOSYS);
}
ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
if (result == -1) {
return uv__new_artificial_error(UV_ENOSYS);
}
ps_cpus = (perfstat_cpu_t*) malloc(ncpus * sizeof(perfstat_cpu_t));
if (!ps_cpus) {
return uv__new_artificial_error(UV_ENOMEM);
}
strcpy(cpu_id.name, FIRST_CPU);
result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
if (result == -1) {
free(ps_cpus);
return uv__new_artificial_error(UV_ENOSYS);
}
*cpu_infos = (uv_cpu_info_t*) malloc(ncpus * sizeof(uv_cpu_info_t));
if (!*cpu_infos) {
free(ps_cpus);
return uv__new_artificial_error(UV_ENOMEM);
}
*count = ncpus;
cpu_info = *cpu_infos;
while (idx < ncpus) {
cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
cpu_info->model = strdup(ps_total.description);
cpu_info->cpu_times.user = ps_cpus[idx].user;
cpu_info->cpu_times.sys = ps_cpus[idx].sys;
cpu_info->cpu_times.idle = ps_cpus[idx].idle;
cpu_info->cpu_times.irq = ps_cpus[idx].wait;
cpu_info->cpu_times.nice = 0;
cpu_info++;
idx++;
}
free(ps_cpus);
return uv_ok_;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; ++i) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
uv_interface_address_t* address;
int sockfd, size = 1;
struct ifconf ifc;
struct ifreq *ifr, *p, flg;
*count = 0;
if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) {
return uv__new_artificial_error(UV_ENOSYS);
}
if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) {
close(sockfd);
return uv__new_artificial_error(UV_ENOSYS);
}
ifc.ifc_req = (struct ifreq*)malloc(size);
ifc.ifc_len = size;
if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
close(sockfd);
return uv__new_artificial_error(UV_ENOSYS);
}
#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
/* Count all up and running ipv4/ipv6 addresses */
ifr = ifc.ifc_req;
while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
p = ifr;
ifr = (struct ifreq*)
((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
if (!(p->ifr_addr.sa_family == AF_INET6 ||
p->ifr_addr.sa_family == AF_INET))
continue;
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
close(sockfd);
return uv__new_artificial_error(UV_ENOSYS);
}
if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
continue;
(*count)++;
}
/* Alloc the return interface structs */
*addresses = (uv_interface_address_t*)
malloc(*count * sizeof(uv_interface_address_t));
if (!(*addresses)) {
close(sockfd);
return uv__new_artificial_error(UV_ENOMEM);
}
address = *addresses;
ifr = ifc.ifc_req;
while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
p = ifr;
ifr = (struct ifreq*)
((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
if (!(p->ifr_addr.sa_family == AF_INET6 ||
p->ifr_addr.sa_family == AF_INET))
continue;
memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
close(sockfd);
return uv__new_artificial_error(UV_ENOSYS);
}
if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
continue;
/* All conditions above must match count loop */
address->name = strdup(p->ifr_name);
if (p->ifr_addr.sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6 *)&p->ifr_addr);
} else {
address->address.address4 = *((struct sockaddr_in *)&p->ifr_addr);
}
address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
address++;
}
#undef ADDR_SIZE
close(sockfd);
return uv_ok_;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; ++i) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -1,281 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* This file contains both the uv__async internal infrastructure and the
* user-facing uv_async_t functions.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static void uv__async_event(uv_loop_t* loop,
struct uv__async* w,
unsigned int nevents);
static int uv__async_make_pending(int* pending);
static int uv__async_eventfd(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
if (uv__async_start(loop, &loop->async_watcher, uv__async_event))
return uv__set_sys_error(loop, errno);
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb;
handle->pending = 0;
ngx_queue_insert_tail(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
return 0;
}
int uv_async_send(uv_async_t* handle) {
if (uv__async_make_pending(&handle->pending) == 0)
uv__async_send(&handle->loop->async_watcher);
return 0;
}
void uv__async_close(uv_async_t* handle) {
ngx_queue_remove(&handle->queue);
uv__handle_stop(handle);
}
static void uv__async_event(uv_loop_t* loop,
struct uv__async* w,
unsigned int nevents) {
ngx_queue_t* q;
uv_async_t* h;
ngx_queue_foreach(q, &loop->async_handles) {
h = ngx_queue_data(q, uv_async_t, queue);
if (!h->pending) continue;
h->pending = 0;
h->async_cb(h, 0);
}
}
static int uv__async_make_pending(int* pending) {
/* Do a cheap read first. */
if (ACCESS_ONCE(int, *pending) != 0)
return 1;
/* Micro-optimization: use atomic memory operations to detect if we've been
* preempted by another thread and don't have to make an expensive syscall.
* This speeds up the heavily contended case by about 1-2% and has little
* if any impact on the non-contended case.
*
* Use XCHG instead of the CMPXCHG that __sync_val_compare_and_swap() emits
* on x86, it's about 4x faster. It probably makes zero difference in the
* grand scheme of things but I'm OCD enough not to let this one pass.
*/
#if defined(__i386__) || defined(__x86_64__)
{
unsigned int val = 1;
__asm__ __volatile__ ("xchgl %0, %1"
: "+r" (val)
: "m" (*pending));
return val != 0;
}
#elif defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ > 0)
return __sync_val_compare_and_swap(pending, 0, 1) != 0;
#else
ACCESS_ONCE(int, *pending) = 1;
return 0;
#endif
}
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
struct uv__async* wa;
char buf[1024];
unsigned n;
ssize_t r;
n = 0;
for (;;) {
r = read(w->fd, buf, sizeof(buf));
if (r > 0)
n += r;
if (r == sizeof(buf))
continue;
if (r != -1)
break;
if (errno == EAGAIN || errno == EWOULDBLOCK)
break;
if (errno == EINTR)
continue;
abort();
}
wa = container_of(w, struct uv__async, io_watcher);
#if defined(__linux__)
if (wa->wfd == -1) {
uint64_t val;
assert(n == sizeof(val));
memcpy(&val, buf, sizeof(val)); /* Avoid alignment issues. */
wa->cb(loop, wa, val);
return;
}
#endif
wa->cb(loop, wa, n);
}
void uv__async_send(struct uv__async* wa) {
const void* buf;
ssize_t len;
int fd;
int r;
buf = "";
len = 1;
fd = wa->wfd;
#if defined(__linux__)
if (fd == -1) {
static const uint64_t val = 1;
buf = &val;
len = sizeof(val);
fd = wa->io_watcher.fd; /* eventfd */
}
#endif
do
r = write(fd, buf, len);
while (r == -1 && errno == EINTR);
if (r == len)
return;
if (r == -1)
if (errno == EAGAIN || errno == EWOULDBLOCK)
return;
abort();
}
void uv__async_init(struct uv__async* wa) {
wa->io_watcher.fd = -1;
wa->wfd = -1;
}
int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb) {
int pipefd[2];
int fd;
if (wa->io_watcher.fd != -1)
return 0;
fd = uv__async_eventfd();
if (fd >= 0) {
pipefd[0] = fd;
pipefd[1] = -1;
}
else if (fd != -ENOSYS)
return -1;
else if (uv__make_pipe(pipefd, UV__F_NONBLOCK))
return -1;
uv__io_init(&wa->io_watcher, uv__async_io, pipefd[0]);
uv__io_start(loop, &wa->io_watcher, UV__POLLIN);
wa->wfd = pipefd[1];
wa->cb = cb;
return 0;
}
void uv__async_stop(uv_loop_t* loop, struct uv__async* wa) {
if (wa->io_watcher.fd == -1)
return;
uv__io_stop(loop, &wa->io_watcher, UV__POLLIN);
close(wa->io_watcher.fd);
wa->io_watcher.fd = -1;
if (wa->wfd != -1) {
close(wa->wfd);
wa->wfd = -1;
}
}
static int uv__async_eventfd() {
#if defined(__linux__)
static int no_eventfd2;
static int no_eventfd;
int fd;
if (no_eventfd2)
goto skip_eventfd2;
fd = uv__eventfd2(0, UV__EFD_CLOEXEC | UV__EFD_NONBLOCK);
if (fd != -1)
return fd;
if (errno != ENOSYS)
return -errno;
no_eventfd2 = 1;
skip_eventfd2:
if (no_eventfd)
goto skip_eventfd;
fd = uv__eventfd(0);
if (fd != -1) {
uv__cloexec(fd, 1);
uv__nonblock(fd, 1);
return fd;
}
if (errno != ENOSYS)
return -errno;
no_eventfd = 1;
skip_eventfd:
#endif
return -ENOSYS;
}

View File

@ -1,732 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stddef.h> /* NULL */
#include <stdio.h> /* printf */
#include <stdlib.h>
#include <string.h> /* strerror */
#include <errno.h>
#include <assert.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <limits.h> /* INT_MAX, PATH_MAX */
#include <sys/uio.h> /* writev */
#ifdef __linux__
# include <sys/ioctl.h>
#endif
#ifdef __sun
# include <sys/types.h>
# include <sys/wait.h>
#endif
#ifdef __APPLE__
# include <mach-o/dyld.h> /* _NSGetExecutablePath */
# include <sys/filio.h>
# include <sys/ioctl.h>
#endif
#ifdef __FreeBSD__
# include <sys/sysctl.h>
# include <sys/filio.h>
# include <sys/ioctl.h>
# include <sys/wait.h>
#endif
static void uv__run_pending(uv_loop_t* loop);
static uv_loop_t default_loop_struct;
static uv_loop_t* default_loop_ptr;
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
sizeof(((struct iovec*) 0)->iov_base));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
sizeof(((struct iovec*) 0)->iov_len));
STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
uint64_t uv_hrtime(void) {
return uv__hrtime();
}
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
handle->flags |= UV_CLOSING;
handle->close_cb = close_cb;
switch (handle->type) {
case UV_NAMED_PIPE:
uv__pipe_close((uv_pipe_t*)handle);
break;
case UV_TTY:
uv__stream_close((uv_stream_t*)handle);
break;
case UV_TCP:
uv__tcp_close((uv_tcp_t*)handle);
break;
case UV_UDP:
uv__udp_close((uv_udp_t*)handle);
break;
case UV_PREPARE:
uv__prepare_close((uv_prepare_t*)handle);
break;
case UV_CHECK:
uv__check_close((uv_check_t*)handle);
break;
case UV_IDLE:
uv__idle_close((uv_idle_t*)handle);
break;
case UV_ASYNC:
uv__async_close((uv_async_t*)handle);
break;
case UV_TIMER:
uv__timer_close((uv_timer_t*)handle);
break;
case UV_PROCESS:
uv__process_close((uv_process_t*)handle);
break;
case UV_FS_EVENT:
uv__fs_event_close((uv_fs_event_t*)handle);
break;
case UV_POLL:
uv__poll_close((uv_poll_t*)handle);
break;
case UV_FS_POLL:
uv__fs_poll_close((uv_fs_poll_t*)handle);
break;
case UV_SIGNAL:
uv__signal_close((uv_signal_t*) handle);
/* Signal handles may not be closed immediately. The signal code will */
/* itself close uv__make_close_pending whenever appropriate. */
return;
default:
assert(0);
}
uv__make_close_pending(handle);
}
void uv__make_close_pending(uv_handle_t* handle) {
assert(handle->flags & UV_CLOSING);
assert(!(handle->flags & UV_CLOSED));
handle->next_closing = handle->loop->closing_handles;
handle->loop->closing_handles = handle;
}
static void uv__finish_close(uv_handle_t* handle) {
/* Note: while the handle is in the UV_CLOSING state now, it's still possible
* for it to be active in the sense that uv__is_active() returns true.
* A good example is when the user calls uv_shutdown(), immediately followed
* by uv_close(). The handle is considered active at this point because the
* completion of the shutdown req is still pending.
*/
assert(handle->flags & UV_CLOSING);
assert(!(handle->flags & UV_CLOSED));
handle->flags |= UV_CLOSED;
switch (handle->type) {
case UV_PREPARE:
case UV_CHECK:
case UV_IDLE:
case UV_ASYNC:
case UV_TIMER:
case UV_PROCESS:
case UV_FS_EVENT:
case UV_FS_POLL:
case UV_POLL:
case UV_SIGNAL:
break;
case UV_NAMED_PIPE:
case UV_TCP:
case UV_TTY:
uv__stream_destroy((uv_stream_t*)handle);
break;
case UV_UDP:
uv__udp_finish_close((uv_udp_t*)handle);
break;
default:
assert(0);
break;
}
uv__handle_unref(handle);
ngx_queue_remove(&handle->handle_queue);
if (handle->close_cb) {
handle->close_cb(handle);
}
}
static void uv__run_closing_handles(uv_loop_t* loop) {
uv_handle_t* p;
uv_handle_t* q;
p = loop->closing_handles;
loop->closing_handles = NULL;
while (p) {
q = p->next_closing;
uv__finish_close(p);
p = q;
}
}
int uv_is_closing(const uv_handle_t* handle) {
return uv__is_closing(handle);
}
uv_loop_t* uv_default_loop(void) {
if (default_loop_ptr)
return default_loop_ptr;
if (uv__loop_init(&default_loop_struct, /* default_loop? */ 1))
return NULL;
return (default_loop_ptr = &default_loop_struct);
}
uv_loop_t* uv_loop_new(void) {
uv_loop_t* loop;
if ((loop = malloc(sizeof(*loop))) == NULL)
return NULL;
if (uv__loop_init(loop, /* default_loop? */ 0)) {
free(loop);
return NULL;
}
return loop;
}
void uv_loop_delete(uv_loop_t* loop) {
uv__loop_delete(loop);
#ifndef NDEBUG
memset(loop, -1, sizeof *loop);
#endif
if (loop == default_loop_ptr)
default_loop_ptr = NULL;
else
free(loop);
}
int uv_backend_fd(const uv_loop_t* loop) {
return loop->backend_fd;
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (!ngx_queue_empty(&loop->idle_handles))
return 0;
if (loop->closing_handles)
return 0;
return uv__next_timeout(loop);
}
static int uv__loop_alive(uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->closing_handles != NULL;
}
int uv_run(uv_loop_t* loop, uv_run_mode mode) {
int timeout;
int r;
r = uv__loop_alive(loop);
while (r != 0 && loop->stop_flag == 0) {
UV_TICK_START(loop, mode);
uv__update_time(loop);
uv__run_timers(loop);
uv__run_idle(loop);
uv__run_prepare(loop);
uv__run_pending(loop);
timeout = 0;
if ((mode & UV_RUN_NOWAIT) == 0)
timeout = uv_backend_timeout(loop);
uv__io_poll(loop, timeout);
uv__run_check(loop);
uv__run_closing_handles(loop);
r = uv__loop_alive(loop);
UV_TICK_STOP(loop, mode);
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
break;
}
/* The if statement lets gcc compile it to a conditional store. Avoids
* dirtying a cache line.
*/
if (loop->stop_flag != 0)
loop->stop_flag = 0;
return r;
}
void uv_update_time(uv_loop_t* loop) {
uv__update_time(loop);
}
int uv_is_active(const uv_handle_t* handle) {
return uv__is_active(handle);
}
/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
int uv__socket(int domain, int type, int protocol) {
int sockfd;
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
if (sockfd != -1)
goto out;
if (errno != EINVAL)
goto out;
#endif
sockfd = socket(domain, type, protocol);
if (sockfd == -1)
goto out;
if (uv__nonblock(sockfd, 1) || uv__cloexec(sockfd, 1)) {
close(sockfd);
sockfd = -1;
}
#if defined(SO_NOSIGPIPE)
{
int on = 1;
setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
}
#endif
out:
return sockfd;
}
int uv__accept(int sockfd) {
int peerfd;
assert(sockfd >= 0);
while (1) {
#if defined(__linux__)
static int no_accept4;
if (no_accept4)
goto skip;
peerfd = uv__accept4(sockfd,
NULL,
NULL,
UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
if (peerfd != -1)
break;
if (errno == EINTR)
continue;
if (errno != ENOSYS)
break;
no_accept4 = 1;
skip:
#endif
peerfd = accept(sockfd, NULL, NULL);
if (peerfd == -1) {
if (errno == EINTR)
continue;
else
break;
}
if (uv__cloexec(peerfd, 1) || uv__nonblock(peerfd, 1)) {
close(peerfd);
peerfd = -1;
}
break;
}
return peerfd;
}
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
int uv__nonblock(int fd, int set) {
int r;
do
r = ioctl(fd, FIONBIO, &set);
while (r == -1 && errno == EINTR);
return r;
}
int uv__cloexec(int fd, int set) {
int r;
do
r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
while (r == -1 && errno == EINTR);
return r;
}
#else /* !(defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) */
int uv__nonblock(int fd, int set) {
int flags;
int r;
do
r = fcntl(fd, F_GETFL);
while (r == -1 && errno == EINTR);
if (r == -1)
return -1;
/* Bail out now if already set/clear. */
if (!!(r & O_NONBLOCK) == !!set)
return 0;
if (set)
flags = r | O_NONBLOCK;
else
flags = r & ~O_NONBLOCK;
do
r = fcntl(fd, F_SETFL, flags);
while (r == -1 && errno == EINTR);
return r;
}
int uv__cloexec(int fd, int set) {
int flags;
int r;
do
r = fcntl(fd, F_GETFD);
while (r == -1 && errno == EINTR);
if (r == -1)
return -1;
/* Bail out now if already set/clear. */
if (!!(r & FD_CLOEXEC) == !!set)
return 0;
if (set)
flags = r | FD_CLOEXEC;
else
flags = r & ~FD_CLOEXEC;
do
r = fcntl(fd, F_SETFD, flags);
while (r == -1 && errno == EINTR);
return r;
}
#endif /* defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) */
/* This function is not execve-safe, there is a race window
* between the call to dup() and fcntl(FD_CLOEXEC).
*/
int uv__dup(int fd) {
fd = dup(fd);
if (fd == -1)
return -1;
if (uv__cloexec(fd, 1)) {
SAVE_ERRNO(close(fd));
return -1;
}
return fd;
}
uv_err_t uv_cwd(char* buffer, size_t size) {
if (!buffer || !size) {
return uv__new_artificial_error(UV_EINVAL);
}
if (getcwd(buffer, size)) {
return uv_ok_;
} else {
return uv__new_sys_error(errno);
}
}
uv_err_t uv_chdir(const char* dir) {
if (chdir(dir) == 0) {
return uv_ok_;
} else {
return uv__new_sys_error(errno);
}
}
void uv_disable_stdio_inheritance(void) {
int fd;
/* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
* first 16 file descriptors. After that, bail out after the first error.
*/
for (fd = 0; ; fd++)
if (uv__cloexec(fd, 1) && fd > 15)
break;
}
static void uv__run_pending(uv_loop_t* loop) {
ngx_queue_t* q;
uv__io_t* w;
while (!ngx_queue_empty(&loop->pending_queue)) {
q = ngx_queue_head(&loop->pending_queue);
ngx_queue_remove(q);
ngx_queue_init(q);
w = ngx_queue_data(q, uv__io_t, pending_queue);
w->cb(loop, w, UV__POLLOUT);
}
}
static unsigned int next_power_of_two(unsigned int val) {
val -= 1;
val |= val >> 1;
val |= val >> 2;
val |= val >> 4;
val |= val >> 8;
val |= val >> 16;
val += 1;
return val;
}
static void maybe_resize(uv_loop_t* loop, unsigned int len) {
uv__io_t** watchers;
void* fake_watcher_list;
void* fake_watcher_count;
unsigned int nwatchers;
unsigned int i;
if (len <= loop->nwatchers)
return;
/* Preserve fake watcher list and count at the end of the watchers */
if (loop->watchers != NULL) {
fake_watcher_list = loop->watchers[loop->nwatchers];
fake_watcher_count = loop->watchers[loop->nwatchers + 1];
} else {
fake_watcher_list = NULL;
fake_watcher_count = NULL;
}
nwatchers = next_power_of_two(len + 2) - 2;
watchers = realloc(loop->watchers,
(nwatchers + 2) * sizeof(loop->watchers[0]));
if (watchers == NULL)
abort();
for (i = loop->nwatchers; i < nwatchers; i++)
watchers[i] = NULL;
watchers[nwatchers] = fake_watcher_list;
watchers[nwatchers + 1] = fake_watcher_count;
loop->watchers = watchers;
loop->nwatchers = nwatchers;
}
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
assert(cb != NULL);
assert(fd >= -1);
ngx_queue_init(&w->pending_queue);
ngx_queue_init(&w->watcher_queue);
w->cb = cb;
w->fd = fd;
w->events = 0;
w->pevents = 0;
#if defined(UV_HAVE_KQUEUE)
w->rcount = 0;
w->wcount = 0;
#endif /* defined(UV_HAVE_KQUEUE) */
}
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
assert(0 != events);
assert(w->fd >= 0);
assert(w->fd < INT_MAX);
w->pevents |= events;
maybe_resize(loop, w->fd + 1);
#if !defined(__sun)
/* The event ports backend needs to rearm all file descriptors on each and
* every tick of the event loop but the other backends allow us to
* short-circuit here if the event mask is unchanged.
*/
if (w->events == w->pevents) {
if (w->events == 0 && !ngx_queue_empty(&w->watcher_queue)) {
ngx_queue_remove(&w->watcher_queue);
ngx_queue_init(&w->watcher_queue);
}
return;
}
#endif
if (ngx_queue_empty(&w->watcher_queue))
ngx_queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
if (loop->watchers[w->fd] == NULL) {
loop->watchers[w->fd] = w;
loop->nfds++;
}
}
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
assert(0 != events);
if (w->fd == -1)
return;
assert(w->fd >= 0);
/* Happens when uv__io_stop() is called on a handle that was never started. */
if ((unsigned) w->fd >= loop->nwatchers)
return;
w->pevents &= ~events;
if (w->pevents == 0) {
ngx_queue_remove(&w->watcher_queue);
ngx_queue_init(&w->watcher_queue);
if (loop->watchers[w->fd] != NULL) {
assert(loop->watchers[w->fd] == w);
assert(loop->nfds > 0);
loop->watchers[w->fd] = NULL;
loop->nfds--;
w->events = 0;
}
}
else if (ngx_queue_empty(&w->watcher_queue))
ngx_queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
ngx_queue_remove(&w->pending_queue);
/* Remove stale events for this file descriptor */
uv__platform_invalidate_fd(loop, w->fd);
}
void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
if (ngx_queue_empty(&w->pending_queue))
ngx_queue_insert_tail(&loop->pending_queue, &w->pending_queue);
}
int uv__io_active(const uv__io_t* w, unsigned int events) {
assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
assert(0 != events);
return 0 != (w->pevents & events);
}

View File

@ -1,88 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <errno.h>
#include <unistd.h>
#include <time.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
void uv_loadavg(double avg[3]) {
/* Unsupported as of cygwin 1.7.7 */
avg[0] = avg[1] = avg[2] = 0;
}
int uv_exepath(char* buffer, size_t* size) {
if (!buffer || !size) {
return -1;
}
*size = readlink("/proc/self/exe", buffer, *size - 1);
if (*size <= 0) return -1;
buffer[*size] = '\0';
return 0;
}
uint64_t uv_get_free_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
uint64_t uv_get_total_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
}
int uv_fs_event_init(uv_loop_t* loop,
uv_fs_event_t* handle,
const char* filename,
uv_fs_event_cb cb,
int flags) {
uv__set_sys_error(loop, ENOSYS);
return -1;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
assert(0 && "implement me");
}

View File

@ -1,123 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <TargetConditionals.h>
#if !TARGET_OS_IPHONE
# include <CoreFoundation/CoreFoundation.h>
# include <ApplicationServices/ApplicationServices.h>
#endif
int uv__set_process_title(const char* title) {
#if TARGET_OS_IPHONE
return -1;
#else
typedef CFTypeRef (*LSGetCurrentApplicationASNType)(void);
typedef OSStatus (*LSSetApplicationInformationItemType)(int,
CFTypeRef,
CFStringRef,
CFStringRef,
CFDictionaryRef*);
typedef CFDictionaryRef (*LSApplicationCheckInType)(int, CFDictionaryRef);
typedef OSStatus (*SetApplicationIsDaemonType)(int);
typedef void (*LSSetApplicationLaunchServicesServerConnectionStatusType)(
uint64_t, void*);
CFBundleRef launch_services_bundle;
LSGetCurrentApplicationASNType ls_get_current_application_asn;
LSSetApplicationInformationItemType ls_set_application_information_item;
CFStringRef* display_name_key;
CFTypeRef asn;
CFStringRef display_name;
OSStatus err;
CFBundleRef hi_services_bundle;
LSApplicationCheckInType ls_application_check_in;
SetApplicationIsDaemonType set_application_is_daemon;
LSSetApplicationLaunchServicesServerConnectionStatusType
ls_set_application_launch_services_server_connection_status;
launch_services_bundle =
CFBundleGetBundleWithIdentifier(CFSTR("com.apple.LaunchServices"));
if (launch_services_bundle == NULL)
return -1;
ls_get_current_application_asn = (LSGetCurrentApplicationASNType)
CFBundleGetFunctionPointerForName(launch_services_bundle,
CFSTR("_LSGetCurrentApplicationASN"));
if (ls_get_current_application_asn == NULL)
return -1;
ls_set_application_information_item = (LSSetApplicationInformationItemType)
CFBundleGetFunctionPointerForName(launch_services_bundle,
CFSTR("_LSSetApplicationInformationItem"));
if (ls_set_application_information_item == NULL)
return -1;
display_name_key = CFBundleGetDataPointerForName(launch_services_bundle,
CFSTR("_kLSDisplayNameKey"));
if (display_name_key == NULL || *display_name_key == NULL)
return -1;
/* Black 10.9 magic, to remove (Not responding) mark in Activity Monitor */
hi_services_bundle =
CFBundleGetBundleWithIdentifier(CFSTR("com.apple.HIServices"));
if (hi_services_bundle == NULL)
return -1;
set_application_is_daemon = CFBundleGetFunctionPointerForName(
hi_services_bundle,
CFSTR("SetApplicationIsDaemon"));
ls_application_check_in = CFBundleGetFunctionPointerForName(
launch_services_bundle,
CFSTR("_LSApplicationCheckIn"));
ls_set_application_launch_services_server_connection_status =
CFBundleGetFunctionPointerForName(
launch_services_bundle,
CFSTR("_LSSetApplicationLaunchServicesServerConnectionStatus"));
if (set_application_is_daemon == NULL ||
ls_application_check_in == NULL ||
ls_set_application_launch_services_server_connection_status == NULL) {
return -1;
}
if (set_application_is_daemon(1) != noErr)
return -1;
ls_set_application_launch_services_server_connection_status(0, NULL);
/* Check into process manager?! */
ls_application_check_in(-2,
CFBundleGetInfoDictionary(CFBundleGetMainBundle()));
display_name = CFStringCreateWithCString(NULL, title, kCFStringEncodingUTF8);
asn = ls_get_current_application_asn();
err = ls_set_application_information_item(-2, /* Magic value. */
asn,
*display_name_key,
display_name,
NULL);
return (err == noErr) ? 0 : -1;
#endif /* !TARGET_OS_IPHONE */
}

View File

@ -1,437 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdint.h>
#include <errno.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <CoreFoundation/CFRunLoop.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <mach-o/dyld.h> /* _NSGetExecutablePath */
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
/* Forward declarations */
static void uv__cf_loop_runner(void* arg);
static void uv__cf_loop_cb(void* arg);
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
struct uv__cf_loop_signal_s {
void* arg;
cf_loop_signal_cb cb;
ngx_queue_t member;
};
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
CFRunLoopSourceContext ctx;
int r;
if (uv__kqueue_init(loop))
return -1;
loop->cf_loop = NULL;
if ((r = uv_mutex_init(&loop->cf_mutex)))
return r;
if ((r = uv_sem_init(&loop->cf_sem, 0)))
return r;
ngx_queue_init(&loop->cf_signals);
memset(&ctx, 0, sizeof(ctx));
ctx.info = loop;
ctx.perform = uv__cf_loop_cb;
loop->cf_cb = CFRunLoopSourceCreate(NULL, 0, &ctx);
if ((r = uv_thread_create(&loop->cf_thread, uv__cf_loop_runner, loop)))
return r;
/* Synchronize threads */
uv_sem_wait(&loop->cf_sem);
assert(ACCESS_ONCE(CFRunLoopRef, loop->cf_loop) != NULL);
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
ngx_queue_t* item;
uv__cf_loop_signal_t* s;
assert(loop->cf_loop != NULL);
uv__cf_loop_signal(loop, NULL, NULL);
uv_thread_join(&loop->cf_thread);
uv_sem_destroy(&loop->cf_sem);
uv_mutex_destroy(&loop->cf_mutex);
/* Free any remaining data */
while (!ngx_queue_empty(&loop->cf_signals)) {
item = ngx_queue_head(&loop->cf_signals);
s = ngx_queue_data(item, uv__cf_loop_signal_t, member);
ngx_queue_remove(item);
free(s);
}
}
static void uv__cf_loop_runner(void* arg) {
uv_loop_t* loop;
loop = arg;
/* Get thread's loop */
ACCESS_ONCE(CFRunLoopRef, loop->cf_loop) = CFRunLoopGetCurrent();
CFRunLoopAddSource(loop->cf_loop,
loop->cf_cb,
kCFRunLoopDefaultMode);
uv_sem_post(&loop->cf_sem);
CFRunLoopRun();
CFRunLoopRemoveSource(loop->cf_loop,
loop->cf_cb,
kCFRunLoopDefaultMode);
}
static void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
ngx_queue_t* item;
ngx_queue_t split_head;
uv__cf_loop_signal_t* s;
loop = arg;
uv_mutex_lock(&loop->cf_mutex);
ngx_queue_init(&split_head);
if (!ngx_queue_empty(&loop->cf_signals)) {
ngx_queue_t* split_pos = ngx_queue_next(&loop->cf_signals);
ngx_queue_split(&loop->cf_signals, split_pos, &split_head);
}
uv_mutex_unlock(&loop->cf_mutex);
while (!ngx_queue_empty(&split_head)) {
item = ngx_queue_head(&split_head);
s = ngx_queue_data(item, uv__cf_loop_signal_t, member);
/* This was a termination signal */
if (s->cb == NULL)
CFRunLoopStop(loop->cf_loop);
else
s->cb(s->arg);
ngx_queue_remove(item);
free(s);
}
}
void uv__cf_loop_signal(uv_loop_t* loop, cf_loop_signal_cb cb, void* arg) {
uv__cf_loop_signal_t* item;
item = malloc(sizeof(*item));
/* XXX: Fail */
if (item == NULL)
abort();
item->arg = arg;
item->cb = cb;
uv_mutex_lock(&loop->cf_mutex);
ngx_queue_insert_tail(&loop->cf_signals, &item->member);
uv_mutex_unlock(&loop->cf_mutex);
assert(loop->cf_loop != NULL);
CFRunLoopSourceSignal(loop->cf_cb);
CFRunLoopWakeUp(loop->cf_loop);
}
uint64_t uv__hrtime(void) {
mach_timebase_info_data_t info;
if (mach_timebase_info(&info) != KERN_SUCCESS)
abort();
return mach_absolute_time() * info.numer / info.denom;
}
int uv_exepath(char* buffer, size_t* size) {
uint32_t usize;
int result;
char* path;
char* fullpath;
if (!buffer || !size) {
return -1;
}
usize = *size;
result = _NSGetExecutablePath(buffer, &usize);
if (result) return result;
path = (char*)malloc(2 * PATH_MAX);
fullpath = realpath(buffer, path);
if (fullpath == NULL) {
free(path);
return -1;
}
strncpy(buffer, fullpath, *size);
free(fullpath);
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
vm_statistics_data_t info;
mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t);
if (host_statistics(mach_host_self(), HOST_VM_INFO,
(host_info_t)&info, &count) != KERN_SUCCESS) {
return -1;
}
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
uint64_t info;
int which[] = {CTL_HW, HW_MEMSIZE};
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return -1;
}
return (uint64_t) info;
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
uv_err_t uv_resident_set_memory(size_t* rss) {
mach_msg_type_number_t count;
task_basic_info_data_t info;
kern_return_t err;
count = TASK_BASIC_INFO_COUNT;
err = task_info(mach_task_self(),
TASK_BASIC_INFO,
(task_info_t) &info,
&count);
(void) &err;
/* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
* KERN_SUCCESS implies a libuv bug.
*/
assert(err == KERN_SUCCESS);
*rss = info.resident_size;
return uv_ok_;
}
uv_err_t uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks);
char model[512];
uint64_t cpuspeed;
size_t size;
unsigned int i;
natural_t numcpus;
mach_msg_type_number_t msg_type;
processor_cpu_load_info_data_t *info;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) < 0 &&
sysctlbyname("hw.model", &model, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
size = sizeof(cpuspeed);
if (sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
(processor_info_array_t*)&info,
&msg_type) != KERN_SUCCESS) {
return uv__new_sys_error(errno);
}
*cpu_infos = (uv_cpu_info_t*)malloc(numcpus * sizeof(uv_cpu_info_t));
if (!(*cpu_infos)) {
return uv__new_artificial_error(UV_ENOMEM);
}
*count = numcpus;
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier;
cpu_info->cpu_times.irq = 0;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed/1000000;
}
vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
return uv_ok_;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
struct ifaddrs *addrs, *ent;
char ip[INET6_ADDRSTRLEN];
uv_interface_address_t* address;
if (getifaddrs(&addrs) != 0) {
return uv__new_sys_error(errno);
}
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == AF_LINK)) {
continue;
}
(*count)++;
}
*addresses = (uv_interface_address_t*)
malloc(*count * sizeof(uv_interface_address_t));
if (!(*addresses)) {
return uv__new_artificial_error(UV_ENOMEM);
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
bzero(&ip, sizeof (ip));
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING)) {
continue;
}
if (ent->ifa_addr == NULL) {
continue;
}
/*
* On Mac OS X getifaddrs returns information related to Mac Addresses for
* various devices, such as firewire, etc. These are not relevant here.
*/
if (ent->ifa_addr->sa_family == AF_LINK) {
continue;
}
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6 *)ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in *)ent->ifa_addr);
}
address->is_internal = ent->ifa_flags & IFF_LOOPBACK ? 1 : 0;
address++;
}
freeifaddrs(addrs);
return uv_ok_;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -1,83 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <dlfcn.h>
#include <errno.h>
#include <string.h>
#include <locale.h>
static int uv__dlerror(uv_lib_t* lib);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
dlerror(); /* Reset error status. */
lib->errmsg = NULL;
lib->handle = dlopen(filename, RTLD_LAZY);
return lib->handle ? 0 : uv__dlerror(lib);
}
void uv_dlclose(uv_lib_t* lib) {
if (lib->errmsg) {
free(lib->errmsg);
lib->errmsg = NULL;
}
if (lib->handle) {
/* Ignore errors. No good way to signal them without leaking memory. */
dlclose(lib->handle);
lib->handle = NULL;
}
}
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
dlerror(); /* Reset error status. */
*ptr = dlsym(lib->handle, name);
return uv__dlerror(lib);
}
const char* uv_dlerror(uv_lib_t* lib) {
return lib->errmsg ? lib->errmsg : "no error";
}
static int uv__dlerror(uv_lib_t* lib) {
char* errmsg;
if (lib->errmsg)
free(lib->errmsg);
errmsg = dlerror();
if (errmsg) {
lib->errmsg = strdup(errmsg);
return -1;
}
else {
lib->errmsg = NULL;
return 0;
}
}

View File

@ -1,110 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* TODO Share this code with Windows.
* See https://github.com/joyent/libuv/issues/76
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/* TODO Expose callback to user to handle fatal error like V8 does. */
void uv_fatal_error(const int errorno, const char* syscall) {
char* buf = NULL;
const char* errmsg;
if (buf) {
errmsg = buf;
} else {
errmsg = "Unknown error";
}
if (syscall) {
fprintf(stderr, "\nlibuv fatal error. %s: (%d) %s\n", syscall, errorno,
errmsg);
} else {
fprintf(stderr, "\nlibuv fatal error. (%d) %s\n", errorno, errmsg);
}
abort();
}
uv_err_code uv_translate_sys_error(int sys_errno) {
switch (sys_errno) {
case 0: return UV_OK;
case EIO: return UV_EIO;
case EPERM: return UV_EPERM;
case ENOSYS: return UV_ENOSYS;
case ENOTSOCK: return UV_ENOTSOCK;
case ENOENT: return UV_ENOENT;
case EACCES: return UV_EACCES;
case EAFNOSUPPORT: return UV_EAFNOSUPPORT;
case EBADF: return UV_EBADF;
case EPIPE: return UV_EPIPE;
case ESPIPE: return UV_ESPIPE;
case EAGAIN: return UV_EAGAIN;
#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK: return UV_EAGAIN;
#endif
case ECONNRESET: return UV_ECONNRESET;
case EFAULT: return UV_EFAULT;
case EMFILE: return UV_EMFILE;
case EMSGSIZE: return UV_EMSGSIZE;
case ENAMETOOLONG: return UV_ENAMETOOLONG;
case EINVAL: return UV_EINVAL;
case ENETDOWN: return UV_ENETDOWN;
case ENETUNREACH: return UV_ENETUNREACH;
case ECONNABORTED: return UV_ECONNABORTED;
case ELOOP: return UV_ELOOP;
case ECONNREFUSED: return UV_ECONNREFUSED;
case EADDRINUSE: return UV_EADDRINUSE;
case EADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
case ENOTDIR: return UV_ENOTDIR;
case EISDIR: return UV_EISDIR;
case ENODEV: return UV_ENODEV;
case ENOTCONN: return UV_ENOTCONN;
case EEXIST: return UV_EEXIST;
case EHOSTUNREACH: return UV_EHOSTUNREACH;
case EAI_NONAME: return UV_ENOENT;
case ESRCH: return UV_ESRCH;
case ETIMEDOUT: return UV_ETIMEDOUT;
case EXDEV: return UV_EXDEV;
case EBUSY: return UV_EBUSY;
#if ENOTEMPTY != EEXIST
case ENOTEMPTY: return UV_ENOTEMPTY;
#endif
case ENOSPC: return UV_ENOSPC;
case EROFS: return UV_EROFS;
case ENOMEM: return UV_ENOMEM;
case EDQUOT: return UV_ENOSPC;
default: return UV_UNKNOWN;
}
UNREACHABLE();
}

View File

@ -1,343 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <kvm.h>
#include <paths.h>
#include <sys/user.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <vm/vm_param.h> /* VM_LOADAVG */
#include <time.h>
#include <stdlib.h>
#include <unistd.h> /* sysconf */
#include <fcntl.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
#ifndef CPUSTATES
# define CPUSTATES 5U
#endif
#ifndef CP_USER
# define CP_USER 0
# define CP_NICE 1
# define CP_SYS 2
# define CP_IDLE 3
# define CP_INTR 4
#endif
static char *process_title;
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
size_t cb;
if (!buffer || !size) {
return -1;
}
#ifdef __DragonFly__
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_ARGS;
mib[3] = getpid();
#else
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PATHNAME;
mib[3] = -1;
#endif
cb = *size;
if (sysctl(mib, 4, buffer, &cb, NULL, 0) < 0) {
*size = 0;
return -1;
}
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
int freecount;
size_t size = sizeof(freecount);
if(sysctlbyname("vm.stats.vm.v_free_count",
&freecount, &size, NULL, 0) == -1){
return -1;
}
return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
unsigned long info;
int which[] = {CTL_HW, HW_PHYSMEM};
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return -1;
}
return (uint64_t) info;
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc ? strdup(argv[0]) : NULL;
return argv;
}
uv_err_t uv_set_process_title(const char* title) {
int oid[4];
if (process_title) free(process_title);
process_title = strdup(title);
oid[0] = CTL_KERN;
oid[1] = KERN_PROC;
oid[2] = KERN_PROC_ARGS;
oid[3] = getpid();
sysctl(oid,
ARRAY_SIZE(oid),
NULL,
NULL,
process_title,
strlen(process_title) + 1);
return uv_ok_;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
if (process_title) {
strncpy(buffer, process_title, size);
} else {
if (size > 0) {
buffer[0] = '\0';
}
}
return uv_ok_;
}
uv_err_t uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc *kinfo = NULL;
pid_t pid;
int nprocs;
size_t page_size = getpagesize();
pid = getpid();
kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open");
if (kd == NULL) goto error;
kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, &nprocs);
if (kinfo == NULL) goto error;
#ifdef __DragonFly__
*rss = kinfo->kp_vm_rssize * page_size;
#else
*rss = kinfo->ki_rssize * page_size;
#endif
kvm_close(kd);
return uv_ok_;
error:
if (kd) kvm_close(kd);
return uv__new_sys_error(errno);
}
uv_err_t uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus,
cur = 0;
uv_cpu_info_t* cpu_info;
const char* maxcpus_key;
const char* cptimes_key;
char model[512];
long* cp_times;
int numcpus;
size_t size;
int i;
#if defined(__DragonFly__)
/* This is not quite correct but DragonFlyBSD doesn't seem to have anything
* comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total,
* not per CPU). At least this stops uv_cpu_info() from failing completely.
*/
maxcpus_key = "hw.ncpu";
cptimes_key = "kern.cp_time";
#else
maxcpus_key = "kern.smp.maxcpus";
cptimes_key = "kern.cp_times";
#endif
size = sizeof(model);
if (sysctlbyname("hw.model", &model, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
size = sizeof(numcpus);
if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
*cpu_infos = (uv_cpu_info_t*)malloc(numcpus * sizeof(uv_cpu_info_t));
if (!(*cpu_infos)) {
return uv__new_artificial_error(UV_ENOMEM);
}
*count = numcpus;
size = sizeof(cpuspeed);
if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0) < 0) {
free(*cpu_infos);
return uv__new_sys_error(errno);
}
/* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of ncpu */
size = sizeof(maxcpus);
if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0) < 0) {
free(*cpu_infos);
return uv__new_sys_error(errno);
}
size = maxcpus * CPUSTATES * sizeof(long);
cp_times = malloc(size);
if (cp_times == NULL) {
free(*cpu_infos);
return uv__new_sys_error(ENOMEM);
}
if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0) < 0) {
free(cp_times);
free(*cpu_infos);
return uv__new_sys_error(errno);
}
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed;
cur+=CPUSTATES;
}
free(cp_times);
return uv_ok_;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
/* TODO: implement */
*addresses = NULL;
*count = 0;
return uv_ok_;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
}

View File

@ -1,875 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <pthread.h>
#include <dirent.h>
#include <unistd.h>
#include <fcntl.h>
#include <utime.h>
#include <poll.h>
#if defined(__linux__) || defined(__sun)
# include <sys/sendfile.h>
#elif defined(__APPLE__) || defined(__FreeBSD__)
# include <sys/socket.h>
# include <sys/uio.h>
#endif
#define INIT(type) \
do { \
uv__req_init((loop), (req), UV_FS); \
(req)->fs_type = UV_FS_ ## type; \
(req)->errorno = 0; \
(req)->result = 0; \
(req)->ptr = NULL; \
(req)->loop = loop; \
(req)->path = NULL; \
(req)->new_path = NULL; \
(req)->cb = (cb); \
} \
while (0)
#define PATH \
do { \
if (NULL == ((req)->path = strdup((path)))) \
return uv__set_sys_error((loop), ENOMEM); \
} \
while (0)
#define PATH2 \
do { \
size_t path_len; \
size_t new_path_len; \
\
path_len = strlen((path)) + 1; \
new_path_len = strlen((new_path)) + 1; \
\
if (NULL == ((req)->path = malloc(path_len + new_path_len))) \
return uv__set_sys_error((loop), ENOMEM); \
\
(req)->new_path = (req)->path + path_len; \
memcpy((void*) (req)->path, (path), path_len); \
memcpy((void*) (req)->new_path, (new_path), new_path_len); \
} \
while (0)
#define POST \
do { \
if ((cb) != NULL) { \
uv__work_submit((loop), &(req)->work_req, uv__fs_work, uv__fs_done); \
return 0; \
} \
else { \
uv__fs_work(&(req)->work_req); \
uv__fs_done(&(req)->work_req, 0); \
return (req)->result; \
} \
} \
while (0)
static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
return fdatasync(req->file);
#elif defined(__APPLE__) && defined(F_FULLFSYNC)
return fcntl(req->file, F_FULLFSYNC);
#else
return fsync(req->file);
#endif
}
static ssize_t uv__fs_futime(uv_fs_t* req) {
#if defined(__linux__)
/* utimesat() has nanosecond resolution but we stick to microseconds
* for the sake of consistency with other platforms.
*/
static int no_utimesat;
struct timespec ts[2];
struct timeval tv[2];
char path[sizeof("/proc/self/fd/") + 3 * sizeof(int)];
int r;
if (no_utimesat)
goto skip;
ts[0].tv_sec = req->atime;
ts[0].tv_nsec = (unsigned long)(req->atime * 1000000) % 1000000 * 1000;
ts[1].tv_sec = req->mtime;
ts[1].tv_nsec = (unsigned long)(req->mtime * 1000000) % 1000000 * 1000;
r = uv__utimesat(req->file, NULL, ts, 0);
if (r == 0)
return r;
if (errno != ENOSYS)
return r;
no_utimesat = 1;
skip:
tv[0].tv_sec = req->atime;
tv[0].tv_usec = (unsigned long)(req->atime * 1000000) % 1000000;
tv[1].tv_sec = req->mtime;
tv[1].tv_usec = (unsigned long)(req->mtime * 1000000) % 1000000;
snprintf(path, sizeof(path), "/proc/self/fd/%d", (int) req->file);
r = utimes(path, tv);
if (r == 0)
return r;
switch (errno) {
case ENOENT:
if (fcntl(req->file, F_GETFL) == -1 && errno == EBADF)
break;
/* Fall through. */
case EACCES:
case ENOTDIR:
errno = ENOSYS;
break;
}
return r;
#elif defined(__APPLE__) \
|| defined(__DragonFly__) \
|| defined(__FreeBSD__) \
|| defined(__sun)
struct timeval tv[2];
tv[0].tv_sec = req->atime;
tv[0].tv_usec = (unsigned long)(req->atime * 1000000) % 1000000;
tv[1].tv_sec = req->mtime;
tv[1].tv_usec = (unsigned long)(req->mtime * 1000000) % 1000000;
return futimes(req->file, tv);
#else
errno = ENOSYS;
return -1;
#endif
}
static ssize_t uv__fs_read(uv_fs_t* req) {
if (req->off < 0)
return read(req->file, req->buf, req->len);
else
return pread(req->file, req->buf, req->len, req->off);
}
static int uv__fs_readdir_filter(const struct dirent* dent) {
return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
}
/* This should have been called uv__fs_scandir(). */
static ssize_t uv__fs_readdir(uv_fs_t* req) {
struct dirent **dents;
int saved_errno;
size_t off;
size_t len;
char *buf;
int i;
int n;
dents = NULL;
n = scandir(req->path, &dents, uv__fs_readdir_filter, alphasort);
if (n == 0)
goto out; /* osx still needs to deallocate some memory */
else if (n == -1)
return n;
len = 0;
for (i = 0; i < n; i++)
len += strlen(dents[i]->d_name) + 1;
buf = malloc(len);
if (buf == NULL) {
errno = ENOMEM;
n = -1;
goto out;
}
off = 0;
for (i = 0; i < n; i++) {
len = strlen(dents[i]->d_name) + 1;
memcpy(buf + off, dents[i]->d_name, len);
off += len;
}
req->ptr = buf;
out:
saved_errno = errno;
if (dents != NULL) {
for (i = 0; i < n; i++)
free(dents[i]);
free(dents);
}
errno = saved_errno;
return n;
}
static ssize_t uv__fs_readlink(uv_fs_t* req) {
ssize_t len;
char* buf;
len = pathconf(req->path, _PC_PATH_MAX);
if (len == -1) {
#if defined(PATH_MAX)
len = PATH_MAX;
#else
len = 4096;
#endif
}
buf = malloc(len + 1);
if (buf == NULL) {
errno = ENOMEM;
return -1;
}
len = readlink(req->path, buf, len);
if (len == -1) {
free(buf);
return -1;
}
buf[len] = '\0';
req->ptr = buf;
return 0;
}
static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
struct pollfd pfd;
int use_pread;
off_t offset;
ssize_t nsent;
ssize_t nread;
ssize_t nwritten;
size_t buflen;
size_t len;
ssize_t n;
int in_fd;
int out_fd;
char buf[8192];
len = req->len;
in_fd = req->flags;
out_fd = req->file;
offset = req->off;
use_pread = 1;
/* Here are the rules regarding errors:
*
* 1. Read errors are reported only if nsent==0, otherwise we return nsent.
* The user needs to know that some data has already been sent, to stop
* them from sending it twice.
*
* 2. Write errors are always reported. Write errors are bad because they
* mean data loss: we've read data but now we can't write it out.
*
* We try to use pread() and fall back to regular read() if the source fd
* doesn't support positional reads, for example when it's a pipe fd.
*
* If we get EAGAIN when writing to the target fd, we poll() on it until
* it becomes writable again.
*
* FIXME: If we get a write error when use_pread==1, it should be safe to
* return the number of sent bytes instead of an error because pread()
* is, in theory, idempotent. However, special files in /dev or /proc
* may support pread() but not necessarily return the same data on
* successive reads.
*
* FIXME: There is no way now to signal that we managed to send *some* data
* before a write error.
*/
for (nsent = 0; (size_t) nsent < len; ) {
buflen = len - nsent;
if (buflen > sizeof(buf))
buflen = sizeof(buf);
do
if (use_pread)
nread = pread(in_fd, buf, buflen, offset);
else
nread = read(in_fd, buf, buflen);
while (nread == -1 && errno == EINTR);
if (nread == 0)
goto out;
if (nread == -1) {
if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
use_pread = 0;
continue;
}
if (nsent == 0)
nsent = -1;
goto out;
}
for (nwritten = 0; nwritten < nread; ) {
do
n = write(out_fd, buf + nwritten, nread - nwritten);
while (n == -1 && errno == EINTR);
if (n != -1) {
nwritten += n;
continue;
}
if (errno != EAGAIN && errno != EWOULDBLOCK) {
nsent = -1;
goto out;
}
pfd.fd = out_fd;
pfd.events = POLLOUT;
pfd.revents = 0;
do
n = poll(&pfd, 1, -1);
while (n == -1 && errno == EINTR);
if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
errno = EIO;
nsent = -1;
goto out;
}
}
offset += nread;
nsent += nread;
}
out:
if (nsent != -1)
req->off = offset;
return nsent;
}
static ssize_t uv__fs_sendfile(uv_fs_t* req) {
int in_fd;
int out_fd;
in_fd = req->flags;
out_fd = req->file;
#if defined(__linux__) || defined(__sun)
{
off_t off;
ssize_t r;
off = req->off;
r = sendfile(out_fd, in_fd, &off, req->len);
/* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
* it still writes out data. Fortunately, we can detect it by checking if
* the offset has been updated.
*/
if (r != -1 || off > req->off) {
r = off - req->off;
req->off = off;
return r;
}
if (errno == EINVAL ||
errno == EIO ||
errno == ENOTSOCK ||
errno == EXDEV) {
errno = 0;
return uv__fs_sendfile_emul(req);
}
return -1;
}
#elif defined(__FreeBSD__) || defined(__APPLE__)
{
off_t len;
ssize_t r;
/* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
* non-blocking mode and not all data could be written. If a non-zero
* number of bytes have been sent, we don't consider it an error.
*/
#if defined(__FreeBSD__)
len = 0;
r = sendfile(in_fd, out_fd, req->off, req->len, NULL, &len, 0);
#else
/* The darwin sendfile takes len as an input for the length to send,
* so make sure to initialize it with the caller's value. */
len = req->len;
r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
#endif
if (r != -1 || len != 0) {
req->off += len;
return (ssize_t) len;
}
if (errno == EINVAL ||
errno == EIO ||
errno == ENOTSOCK ||
errno == EXDEV) {
errno = 0;
return uv__fs_sendfile_emul(req);
}
return -1;
}
#else
return uv__fs_sendfile_emul(req);
#endif
}
static ssize_t uv__fs_utime(uv_fs_t* req) {
struct utimbuf buf;
buf.actime = req->atime;
buf.modtime = req->mtime;
return utime(req->path, &buf); /* TODO use utimes() where available */
}
static ssize_t uv__fs_write(uv_fs_t* req) {
ssize_t r;
/* Serialize writes on OS X, concurrent write() and pwrite() calls result in
* data loss. We can't use a per-file descriptor lock, the descriptor may be
* a dup().
*/
#if defined(__APPLE__)
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&lock);
#endif
if (req->off < 0)
r = write(req->file, req->buf, req->len);
else
r = pwrite(req->file, req->buf, req->len, req->off);
#if defined(__APPLE__)
pthread_mutex_unlock(&lock);
#endif
return r;
}
static void uv__fs_work(struct uv__work* w) {
int retry_on_eintr;
uv_fs_t* req;
ssize_t r;
req = container_of(w, uv_fs_t, work_req);
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE);
do {
errno = 0;
#define X(type, action) \
case UV_FS_ ## type: \
r = action; \
break;
switch (req->fs_type) {
X(CHMOD, chmod(req->path, req->mode));
X(CHOWN, chown(req->path, req->uid, req->gid));
X(CLOSE, close(req->file));
X(FCHMOD, fchmod(req->file, req->mode));
X(FCHOWN, fchown(req->file, req->uid, req->gid));
X(FDATASYNC, uv__fs_fdatasync(req));
X(FSTAT, fstat(req->file, &req->statbuf));
X(FSYNC, fsync(req->file));
X(FTRUNCATE, ftruncate(req->file, req->off));
X(FUTIME, uv__fs_futime(req));
X(LSTAT, lstat(req->path, &req->statbuf));
X(LINK, link(req->path, req->new_path));
X(MKDIR, mkdir(req->path, req->mode));
X(OPEN, open(req->path, req->flags, req->mode));
X(READ, uv__fs_read(req));
X(READDIR, uv__fs_readdir(req));
X(READLINK, uv__fs_readlink(req));
X(RENAME, rename(req->path, req->new_path));
X(RMDIR, rmdir(req->path));
X(SENDFILE, uv__fs_sendfile(req));
X(STAT, stat(req->path, &req->statbuf));
X(SYMLINK, symlink(req->path, req->new_path));
X(UNLINK, unlink(req->path));
X(UTIME, uv__fs_utime(req));
X(WRITE, uv__fs_write(req));
default: abort();
}
#undef X
}
while (r == -1 && errno == EINTR && retry_on_eintr);
req->errorno = errno;
req->result = r;
if (r == 0 && (req->fs_type == UV_FS_STAT ||
req->fs_type == UV_FS_FSTAT ||
req->fs_type == UV_FS_LSTAT)) {
req->ptr = &req->statbuf;
}
}
static void uv__fs_done(struct uv__work* w, int status) {
uv_fs_t* req;
req = container_of(w, uv_fs_t, work_req);
uv__req_unregister(req->loop, req);
if (req->errorno != 0) {
req->errorno = uv_translate_sys_error(req->errorno);
uv__set_artificial_error(req->loop, req->errorno);
}
if (status == -UV_ECANCELED) {
assert(req->errorno == 0);
req->errorno = UV_ECANCELED;
uv__set_artificial_error(req->loop, UV_ECANCELED);
}
if (req->cb != NULL)
req->cb(req);
}
int uv_fs_chmod(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
int mode,
uv_fs_cb cb) {
INIT(CHMOD);
PATH;
req->mode = mode;
POST;
}
int uv_fs_chown(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
uv_uid_t uid,
uv_gid_t gid,
uv_fs_cb cb) {
INIT(CHOWN);
PATH;
req->uid = uid;
req->gid = gid;
POST;
}
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(CLOSE);
req->file = file;
POST;
}
int uv_fs_fchmod(uv_loop_t* loop,
uv_fs_t* req,
uv_file file,
int mode,
uv_fs_cb cb) {
INIT(FCHMOD);
req->file = file;
req->mode = mode;
POST;
}
int uv_fs_fchown(uv_loop_t* loop,
uv_fs_t* req,
uv_file file,
uv_uid_t uid,
uv_gid_t gid,
uv_fs_cb cb) {
INIT(FCHOWN);
req->file = file;
req->uid = uid;
req->gid = gid;
POST;
}
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FDATASYNC);
req->file = file;
POST;
}
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSTAT);
req->file = file;
POST;
}
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSYNC);
req->file = file;
POST;
}
int uv_fs_ftruncate(uv_loop_t* loop,
uv_fs_t* req,
uv_file file,
int64_t off,
uv_fs_cb cb) {
INIT(FTRUNCATE);
req->file = file;
req->off = off;
POST;
}
int uv_fs_futime(uv_loop_t* loop,
uv_fs_t* req,
uv_file file,
double atime,
double mtime,
uv_fs_cb cb) {
INIT(FUTIME);
req->file = file;
req->atime = atime;
req->mtime = mtime;
POST;
}
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(LSTAT);
PATH;
POST;
}
int uv_fs_link(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
const char* new_path,
uv_fs_cb cb) {
INIT(LINK);
PATH2;
POST;
}
int uv_fs_mkdir(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
int mode,
uv_fs_cb cb) {
INIT(MKDIR);
PATH;
req->mode = mode;
POST;
}
int uv_fs_open(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
int flags,
int mode,
uv_fs_cb cb) {
INIT(OPEN);
PATH;
req->flags = flags;
req->mode = mode;
POST;
}
int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
uv_file file,
void* buf,
size_t len,
int64_t off,
uv_fs_cb cb) {
INIT(READ);
req->file = file;
req->buf = buf;
req->len = len;
req->off = off;
POST;
}
int uv_fs_readdir(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
int flags,
uv_fs_cb cb) {
INIT(READDIR);
PATH;
req->flags = flags;
POST;
}
int uv_fs_readlink(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
uv_fs_cb cb) {
INIT(READLINK);
PATH;
POST;
}
int uv_fs_rename(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
const char* new_path,
uv_fs_cb cb) {
INIT(RENAME);
PATH2;
POST;
}
int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(RMDIR);
PATH;
POST;
}
int uv_fs_sendfile(uv_loop_t* loop,
uv_fs_t* req,
uv_file out_fd,
uv_file in_fd,
int64_t off,
size_t len,
uv_fs_cb cb) {
INIT(SENDFILE);
req->flags = in_fd; /* hack */
req->file = out_fd;
req->off = off;
req->len = len;
POST;
}
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(STAT);
PATH;
POST;
}
int uv_fs_symlink(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
const char* new_path,
int flags,
uv_fs_cb cb) {
INIT(SYMLINK);
PATH2;
req->flags = flags;
POST;
}
int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(UNLINK);
PATH;
POST;
}
int uv_fs_utime(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
double atime,
double mtime,
uv_fs_cb cb) {
INIT(UTIME);
PATH;
req->atime = atime;
req->mtime = mtime;
POST;
}
int uv_fs_write(uv_loop_t* loop,
uv_fs_t* req,
uv_file file,
void* buf,
size_t len,
int64_t off,
uv_fs_cb cb) {
INIT(WRITE);
req->file = file;
req->buf = buf;
req->len = len;
req->off = off;
POST;
}
void uv_fs_req_cleanup(uv_fs_t* req) {
free((void*) req->path);
req->path = NULL;
req->new_path = NULL;
if (req->ptr != &req->statbuf)
free(req->ptr);
req->ptr = NULL;
}

View File

@ -1,297 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#if TARGET_OS_IPHONE
/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */
int uv__fsevents_init(uv_fs_event_t* handle) {
return 0;
}
int uv__fsevents_close(uv_fs_event_t* handle) {
return 0;
}
#else /* TARGET_OS_IPHONE */
#include <assert.h>
#include <stdlib.h>
#include <CoreServices/CoreServices.h>
typedef struct uv__fsevents_event_s uv__fsevents_event_t;
struct uv__fsevents_event_s {
int events;
ngx_queue_t member;
char path[1];
};
#define UV__FSEVENTS_WALK(handle, block) \
{ \
ngx_queue_t* curr; \
ngx_queue_t split_head; \
uv__fsevents_event_t* event; \
uv_mutex_lock(&(handle)->cf_mutex); \
ngx_queue_init(&split_head); \
if (!ngx_queue_empty(&(handle)->cf_events)) { \
ngx_queue_t* split_pos = ngx_queue_next(&(handle)->cf_events); \
ngx_queue_split(&(handle)->cf_events, split_pos, &split_head); \
} \
uv_mutex_unlock(&(handle)->cf_mutex); \
while (!ngx_queue_empty(&split_head)) { \
curr = ngx_queue_head(&split_head); \
/* Invoke callback */ \
event = ngx_queue_data(curr, uv__fsevents_event_t, member); \
ngx_queue_remove(curr); \
/* Invoke block code, but only if handle wasn't closed */ \
if (((handle)->flags & (UV_CLOSING | UV_CLOSED)) == 0) \
block \
/* Free allocated data */ \
free(event); \
} \
}
void uv__fsevents_cb(uv_async_t* cb, int status) {
uv_fs_event_t* handle;
handle = cb->data;
UV__FSEVENTS_WALK(handle, {
if (handle->event_watcher.fd != -1)
handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0);
});
if ((handle->flags & (UV_CLOSING | UV_CLOSED)) == 0 &&
handle->event_watcher.fd == -1) {
uv__fsevents_close(handle);
}
}
void uv__fsevents_event_cb(ConstFSEventStreamRef streamRef,
void* info,
size_t numEvents,
void* eventPaths,
const FSEventStreamEventFlags eventFlags[],
const FSEventStreamEventId eventIds[]) {
size_t i;
int len;
char** paths;
char* path;
char* pos;
uv_fs_event_t* handle;
uv__fsevents_event_t* event;
ngx_queue_t add_list;
int kFSEventsModified;
int kFSEventsRenamed;
kFSEventsModified = kFSEventStreamEventFlagItemFinderInfoMod |
kFSEventStreamEventFlagItemModified |
kFSEventStreamEventFlagItemInodeMetaMod |
kFSEventStreamEventFlagItemChangeOwner |
kFSEventStreamEventFlagItemXattrMod;
kFSEventsRenamed = kFSEventStreamEventFlagItemCreated |
kFSEventStreamEventFlagItemRemoved |
kFSEventStreamEventFlagItemRenamed;
handle = info;
paths = eventPaths;
ngx_queue_init(&add_list);
for (i = 0; i < numEvents; i++) {
/* Ignore system events */
if (eventFlags[i] & (kFSEventStreamEventFlagUserDropped |
kFSEventStreamEventFlagKernelDropped |
kFSEventStreamEventFlagEventIdsWrapped |
kFSEventStreamEventFlagHistoryDone |
kFSEventStreamEventFlagMount |
kFSEventStreamEventFlagUnmount |
kFSEventStreamEventFlagRootChanged)) {
continue;
}
/* TODO: Report errors */
path = paths[i];
len = strlen(path);
/* Remove absolute path prefix */
if (strstr(path, handle->realpath) == path) {
path += handle->realpath_len;
len -= handle->realpath_len;
/* Skip back slash */
if (*path != 0) {
path++;
len--;
}
}
#ifdef MAC_OS_X_VERSION_10_7
/* Ignore events with path equal to directory itself */
if (len == 0)
continue;
#endif /* MAC_OS_X_VERSION_10_7 */
/* Do not emit events from subdirectories (without option set) */
pos = strchr(path, '/');
if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 &&
pos != NULL &&
pos != path + 1)
continue;
#ifndef MAC_OS_X_VERSION_10_7
path = "";
len = 0;
#endif /* MAC_OS_X_VERSION_10_7 */
event = malloc(sizeof(*event) + len);
if (event == NULL)
break;
memcpy(event->path, path, len + 1);
if ((eventFlags[i] & kFSEventsModified) != 0 &&
(eventFlags[i] & kFSEventsRenamed) == 0)
event->events = UV_CHANGE;
else
event->events = UV_RENAME;
ngx_queue_insert_tail(&add_list, &event->member);
}
uv_mutex_lock(&handle->cf_mutex);
ngx_queue_add(&handle->cf_events, &add_list);
uv_mutex_unlock(&handle->cf_mutex);
uv_async_send(handle->cf_cb);
}
void uv__fsevents_schedule(void* arg) {
uv_fs_event_t* handle;
handle = arg;
FSEventStreamScheduleWithRunLoop(handle->cf_eventstream,
handle->loop->cf_loop,
kCFRunLoopDefaultMode);
FSEventStreamStart(handle->cf_eventstream);
uv_sem_post(&handle->cf_sem);
}
int uv__fsevents_init(uv_fs_event_t* handle) {
FSEventStreamContext ctx;
FSEventStreamRef ref;
CFStringRef path;
CFArrayRef paths;
CFAbsoluteTime latency;
FSEventStreamCreateFlags flags;
/* Initialize context */
ctx.version = 0;
ctx.info = handle;
ctx.retain = NULL;
ctx.release = NULL;
ctx.copyDescription = NULL;
/* Get absolute path to file */
handle->realpath = realpath(handle->filename, NULL);
if (handle->realpath != NULL)
handle->realpath_len = strlen(handle->realpath);
/* Initialize paths array */
path = CFStringCreateWithFileSystemRepresentation(NULL, handle->filename);
paths = CFArrayCreate(NULL, (const void**)&path, 1, NULL);
latency = 0.15;
/* Set appropriate flags */
flags = kFSEventStreamCreateFlagFileEvents;
ref = FSEventStreamCreate(NULL,
&uv__fsevents_event_cb,
&ctx,
paths,
kFSEventStreamEventIdSinceNow,
latency,
flags);
handle->cf_eventstream = ref;
/*
* Events will occur in other thread.
* Initialize callback for getting them back into event loop's thread
*/
handle->cf_cb = malloc(sizeof(*handle->cf_cb));
if (handle->cf_cb == NULL)
return uv__set_sys_error(handle->loop, ENOMEM);
handle->cf_cb->data = handle;
uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb);
handle->cf_cb->flags |= UV__HANDLE_INTERNAL;
uv_unref((uv_handle_t*) handle->cf_cb);
uv_mutex_init(&handle->cf_mutex);
uv_sem_init(&handle->cf_sem, 0);
ngx_queue_init(&handle->cf_events);
uv__cf_loop_signal(handle->loop, uv__fsevents_schedule, handle);
return 0;
}
int uv__fsevents_close(uv_fs_event_t* handle) {
if (handle->cf_eventstream == NULL)
return -1;
/* Ensure that event stream was scheduled */
uv_sem_wait(&handle->cf_sem);
/* Stop emitting events */
FSEventStreamStop(handle->cf_eventstream);
/* Release stream */
FSEventStreamInvalidate(handle->cf_eventstream);
FSEventStreamRelease(handle->cf_eventstream);
handle->cf_eventstream = NULL;
uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) free);
/* Free data in queue */
UV__FSEVENTS_WALK(handle, {
/* NOP */
})
uv_mutex_destroy(&handle->cf_mutex);
uv_sem_destroy(&handle->cf_sem);
free(handle->realpath);
handle->realpath = NULL;
handle->realpath_len = 0;
return 0;
}
#endif /* TARGET_OS_IPHONE */

View File

@ -1,159 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stddef.h> /* NULL */
#include <stdlib.h>
#include <string.h>
static void uv__getaddrinfo_work(struct uv__work* w) {
uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req);
req->retcode = getaddrinfo(req->hostname,
req->service,
req->hints,
&req->res);
}
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req);
struct addrinfo *res = req->res;
#if defined(__sun)
size_t hostlen;
if (req->hostname)
hostlen = strlen(req->hostname);
else
hostlen = 0;
#endif
req->res = NULL;
uv__req_unregister(req->loop, req);
/* see initialization in uv_getaddrinfo() */
if (req->hints)
free(req->hints);
else if (req->service)
free(req->service);
else if (req->hostname)
free(req->hostname);
else
assert(0);
req->hints = NULL;
req->service = NULL;
req->hostname = NULL;
if (req->retcode == 0) {
/* OK */
#if defined(EAI_NODATA) /* FreeBSD deprecated EAI_NODATA */
} else if (req->retcode == EAI_NONAME || req->retcode == EAI_NODATA) {
#else
} else if (req->retcode == EAI_NONAME) {
#endif
uv__set_sys_error(req->loop, ENOENT); /* FIXME compatibility hack */
#if defined(__sun)
} else if (req->retcode == EAI_MEMORY && hostlen >= MAXHOSTNAMELEN) {
uv__set_sys_error(req->loop, ENOENT);
#endif
} else {
req->loop->last_err.code = UV_EADDRINFO;
req->loop->last_err.sys_errno_ = req->retcode;
}
if (status == -UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_ECANCELED;
uv__set_artificial_error(req->loop, UV_ECANCELED);
}
req->cb(req, req->retcode, res);
}
int uv_getaddrinfo(uv_loop_t* loop,
uv_getaddrinfo_t* req,
uv_getaddrinfo_cb cb,
const char* hostname,
const char* service,
const struct addrinfo* hints) {
size_t hostname_len;
size_t service_len;
size_t hints_len;
size_t len;
char* buf;
if (req == NULL || cb == NULL || (hostname == NULL && service == NULL))
return uv__set_artificial_error(loop, UV_EINVAL);
hostname_len = hostname ? strlen(hostname) + 1 : 0;
service_len = service ? strlen(service) + 1 : 0;
hints_len = hints ? sizeof(*hints) : 0;
buf = malloc(hostname_len + service_len + hints_len);
if (buf == NULL)
return uv__set_artificial_error(loop, UV_ENOMEM);
uv__req_init(loop, req, UV_GETADDRINFO);
req->loop = loop;
req->cb = cb;
req->res = NULL;
req->hints = NULL;
req->service = NULL;
req->hostname = NULL;
req->retcode = 0;
/* order matters, see uv_getaddrinfo_done() */
len = 0;
if (hints) {
req->hints = memcpy(buf + len, hints, sizeof(*hints));
len += sizeof(*hints);
}
if (service) {
req->service = memcpy(buf + len, service, service_len);
len += service_len;
}
if (hostname) {
req->hostname = memcpy(buf + len, hostname, hostname_len);
len += hostname_len;
}
uv__work_submit(loop,
&req->work_req,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
return 0;
}
void uv_freeaddrinfo(struct addrinfo* ai) {
if (ai)
freeaddrinfo(ai);
}

View File

@ -1,267 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_UNIX_INTERNAL_H_
#define UV_UNIX_INTERNAL_H_
#include "uv-common.h"
#include <assert.h>
#include <stdlib.h> /* abort */
#if defined(__STRICT_ANSI__)
# define inline __inline
#endif
#if defined(__linux__)
# include "linux-syscalls.h"
#endif /* __linux__ */
#if defined(__sun)
# include <sys/port.h>
# include <port.h>
# define futimes(fd, tv) futimesat(fd, (void*)0, tv)
#endif /* __sun */
#if defined(__APPLE__) && !TARGET_OS_IPHONE
# include <CoreServices/CoreServices.h>
#endif
#define STATIC_ASSERT(expr) \
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
#define ACCESS_ONCE(type, var) \
(*(volatile type*) &(var))
#define UNREACHABLE() \
do { \
assert(0 && "unreachable code"); \
abort(); \
} \
while (0)
#define SAVE_ERRNO(block) \
do { \
int _saved_errno = errno; \
do { block; } while (0); \
errno = _saved_errno; \
} \
while (0)
#if defined(__linux__)
# define UV__POLLIN UV__EPOLLIN
# define UV__POLLOUT UV__EPOLLOUT
# define UV__POLLERR UV__EPOLLERR
# define UV__POLLHUP UV__EPOLLHUP
#endif
#if defined(__sun)
# define UV__POLLIN POLLIN
# define UV__POLLOUT POLLOUT
# define UV__POLLERR POLLERR
# define UV__POLLHUP POLLHUP
#endif
#ifndef UV__POLLIN
# define UV__POLLIN 1
#endif
#ifndef UV__POLLOUT
# define UV__POLLOUT 2
#endif
#ifndef UV__POLLERR
# define UV__POLLERR 4
#endif
#ifndef UV__POLLHUP
# define UV__POLLHUP 8
#endif
/* handle flags */
enum {
UV_CLOSING = 0x01, /* uv_close() called but not finished. */
UV_CLOSED = 0x02, /* close(2) finished. */
UV_STREAM_READING = 0x04, /* uv_read_start() called. */
UV_STREAM_SHUTTING = 0x08, /* uv_shutdown() called but not complete. */
UV_STREAM_SHUT = 0x10, /* Write side closed. */
UV_STREAM_READABLE = 0x20, /* The stream is readable */
UV_STREAM_WRITABLE = 0x40, /* The stream is writable */
UV_STREAM_BLOCKING = 0x80, /* Synchronous writes. */
UV_TCP_NODELAY = 0x100, /* Disable Nagle. */
UV_TCP_KEEPALIVE = 0x200, /* Turn on keep-alive. */
UV_TCP_SINGLE_ACCEPT = 0x400 /* Only accept() when idle. */
};
/* core */
int uv__nonblock(int fd, int set);
int uv__cloexec(int fd, int set);
int uv__socket(int domain, int type, int protocol);
int uv__dup(int fd);
void uv__make_close_pending(uv_handle_t* handle);
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_close(uv_loop_t* loop, uv__io_t* w);
void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
int uv__io_active(const uv__io_t* w, unsigned int events);
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
/* async */
void uv__async_send(struct uv__async* wa);
void uv__async_init(struct uv__async* wa);
int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb);
void uv__async_stop(uv_loop_t* loop, struct uv__async* wa);
/* loop */
int uv__loop_init(uv_loop_t* loop, int default_loop);
void uv__loop_delete(uv_loop_t* loop);
void uv__run_idle(uv_loop_t* loop);
void uv__run_check(uv_loop_t* loop);
void uv__run_prepare(uv_loop_t* loop);
/* error */
uv_err_code uv_translate_sys_error(int sys_errno);
void uv_fatal_error(const int errorno, const char* syscall);
/* stream */
void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
uv_handle_type type);
int uv__stream_open(uv_stream_t*, int fd, int flags);
void uv__stream_destroy(uv_stream_t* stream);
#if defined(__APPLE__)
int uv__stream_try_select(uv_stream_t* stream, int* fd);
#endif /* defined(__APPLE__) */
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv__accept(int sockfd);
/* tcp */
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_nodelay(int fd, int on);
int uv__tcp_keepalive(int fd, int on, unsigned int delay);
/* pipe */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
/* timer */
void uv__run_timers(uv_loop_t* loop);
int uv__next_timeout(const uv_loop_t* loop);
/* signal */
void uv__signal_close(uv_signal_t* handle);
void uv__signal_global_once_init(void);
void uv__signal_loop_cleanup(uv_loop_t* loop);
/* thread pool */
void uv__work_submit(uv_loop_t* loop,
struct uv__work *w,
void (*work)(struct uv__work *w),
void (*done)(struct uv__work *w, int status));
void uv__work_done(uv_async_t* handle, int status);
/* platform specific */
uint64_t uv__hrtime(void);
int uv__kqueue_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop, int default_loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
/* various */
void uv__async_close(uv_async_t* handle);
void uv__check_close(uv_check_t* handle);
void uv__fs_event_close(uv_fs_event_t* handle);
void uv__idle_close(uv_idle_t* handle);
void uv__pipe_close(uv_pipe_t* handle);
void uv__poll_close(uv_poll_t* handle);
void uv__prepare_close(uv_prepare_t* handle);
void uv__process_close(uv_process_t* handle);
void uv__stream_close(uv_stream_t* handle);
void uv__tcp_close(uv_tcp_t* handle);
void uv__timer_close(uv_timer_t* handle);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
#if defined(__APPLE__)
int uv___stream_fd(uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((uv_stream_t*) (handle)))
#else
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */
#ifdef UV__O_NONBLOCK
# define UV__F_NONBLOCK UV__O_NONBLOCK
#else
# define UV__F_NONBLOCK 1
#endif
int uv__make_socketpair(int fds[2], int flags);
int uv__make_pipe(int fds[2], int flags);
#if defined(__APPLE__)
typedef void (*cf_loop_signal_cb)(void*);
void uv__cf_loop_signal(uv_loop_t* loop, cf_loop_signal_cb cb, void* arg);
int uv__fsevents_init(uv_fs_event_t* handle);
int uv__fsevents_close(uv_fs_event_t* handle);
/* OSX < 10.7 has no file events, polyfill them */
#ifndef MAC_OS_X_VERSION_10_7
static const int kFSEventStreamCreateFlagFileEvents = 0x00000010;
static const int kFSEventStreamEventFlagItemCreated = 0x00000100;
static const int kFSEventStreamEventFlagItemRemoved = 0x00000200;
static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400;
static const int kFSEventStreamEventFlagItemRenamed = 0x00000800;
static const int kFSEventStreamEventFlagItemModified = 0x00001000;
static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000;
static const int kFSEventStreamEventFlagItemChangeOwner = 0x00004000;
static const int kFSEventStreamEventFlagItemXattrMod = 0x00008000;
static const int kFSEventStreamEventFlagItemIsFile = 0x00010000;
static const int kFSEventStreamEventFlagItemIsDir = 0x00020000;
static const int kFSEventStreamEventFlagItemIsSymlink = 0x00040000;
#endif /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 */
#endif /* defined(__APPLE__) */
__attribute__((unused))
static void uv__req_init(uv_loop_t* loop, uv_req_t* req, uv_req_type type) {
req->type = type;
uv__req_register(loop, req);
}
#define uv__req_init(loop, req, type) \
uv__req_init((loop), (uv_req_t*)(req), (type))
__attribute__((unused))
static void uv__update_time(uv_loop_t* loop) {
loop->time = uv__hrtime() / 1000000;
}
#ifdef HAVE_DTRACE
#include "uv-dtrace.h"
#else
#define UV_TICK_START(arg0, arg1)
#define UV_TICK_STOP(arg0, arg1)
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -1,374 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
int uv__kqueue_init(uv_loop_t* loop) {
loop->backend_fd = kqueue();
if (loop->backend_fd == -1)
return -1;
uv__cloexec(loop->backend_fd, 1);
return 0;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct kevent events[1024];
struct kevent* ev;
struct timespec spec;
unsigned int nevents;
unsigned int revents;
ngx_queue_t* q;
uint64_t base;
uint64_t diff;
uv__io_t* w;
int filter;
int fflags;
int count;
int nfds;
int fd;
int op;
int i;
if (loop->nfds == 0) {
assert(ngx_queue_empty(&loop->watcher_queue));
return;
}
nevents = 0;
while (!ngx_queue_empty(&loop->watcher_queue)) {
q = ngx_queue_head(&loop->watcher_queue);
ngx_queue_remove(q);
ngx_queue_init(q);
w = ngx_queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
filter = EVFILT_READ;
fflags = 0;
op = EV_ADD;
if (w->cb == uv__fs_event) {
filter = EVFILT_VNODE;
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
}
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;; nevents = 0) {
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
nfds = kevent(loop->backend_fd,
events,
nevents,
events,
ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (timeout == 0)
return;
if (timeout == -1)
continue;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
ev = events + i;
fd = ev->ident;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it. */
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != EBADF && errno != ENOENT)
abort();
continue;
}
if (ev->filter == EVFILT_VNODE) {
assert(w->events == UV__POLLIN);
assert(w->pevents == UV__POLLIN);
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
nevents++;
continue;
}
revents = 0;
if (ev->filter == EVFILT_READ) {
if (w->pevents & UV__POLLIN) {
revents |= UV__POLLIN;
w->rcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
}
if (ev->filter == EVFILT_WRITE) {
if (w->pevents & UV__POLLOUT) {
revents |= UV__POLLOUT;
w->wcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
}
if (ev->flags & EV_ERROR)
revents |= UV__POLLERR;
if (revents == 0)
continue;
w->cb(loop, w, revents);
nevents++;
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct kevent* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct kevent*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].ident == fd)
events[i].ident = -1;
}
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
uv_fs_event_t* handle;
struct kevent ev;
int events;
handle = container_of(w, uv_fs_event_t, event_watcher);
if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
events = UV_CHANGE;
else
events = UV_RENAME;
handle->cb(handle, NULL, events, 0);
if (handle->event_watcher.fd == -1)
return;
/* Watcher operates in one-shot mode, re-arm it. */
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
abort();
}
int uv_fs_event_init(uv_loop_t* loop,
uv_fs_event_t* handle,
const char* filename,
uv_fs_event_cb cb,
int flags) {
#if defined(__APPLE__)
struct stat statbuf;
#endif /* defined(__APPLE__) */
int fd;
/* TODO open asynchronously - but how do we report back errors? */
if ((fd = open(filename, O_RDONLY)) == -1) {
uv__set_sys_error(loop, errno);
return -1;
}
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
uv__handle_start(handle); /* FIXME shouldn't start automatically */
uv__io_init(&handle->event_watcher, uv__fs_event, fd);
handle->filename = strdup(filename);
handle->cb = cb;
#if defined(__APPLE__)
/* Nullify field to perform checks later */
handle->cf_eventstream = NULL;
handle->realpath = NULL;
handle->realpath_len = 0;
handle->cf_flags = flags;
if (fstat(fd, &statbuf))
goto fallback;
/* FSEvents works only with directories */
if (!(statbuf.st_mode & S_IFDIR))
goto fallback;
return uv__fsevents_init(handle);
fallback:
#endif /* defined(__APPLE__) */
uv__io_start(loop, &handle->event_watcher, UV__POLLIN);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
#if defined(__APPLE__)
if (uv__fsevents_close(handle))
uv__io_stop(handle->loop, &handle->event_watcher, UV__POLLIN);
#else
uv__io_stop(handle->loop, &handle->event_watcher, UV__POLLIN);
#endif /* defined(__APPLE__) */
uv__handle_stop(handle);
free(handle->filename);
handle->filename = NULL;
close(handle->event_watcher.fd);
handle->event_watcher.fd = -1;
}

View File

@ -1,763 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <net/if.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/sysinfo.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#define HAVE_IFADDRS_H 1
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 || __UCLIBC_MINOR__ < 9 || __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
# endif
#endif
#ifdef HAVE_IFADDRS_H
# include <ifaddrs.h>
#endif
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
* include that file because it conflicts with <time.h>. We'll just have to
* define it ourselves.
*/
#ifndef CLOCK_BOOTTIME
# define CLOCK_BOOTTIME 7
#endif
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci);
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static unsigned long read_cpufreq(unsigned int cpunum);
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
int fd;
fd = uv__epoll_create1(UV__EPOLL_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the EPOLL_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = uv__epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
}
loop->backend_fd = fd;
loop->inotify_fd = -1;
loop->inotify_watchers = NULL;
if (fd == -1)
return -1;
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->inotify_fd == -1) return;
uv__io_stop(loop, &loop->inotify_read_watcher, UV__POLLIN);
close(loop->inotify_fd);
loop->inotify_fd = -1;
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct uv__epoll_event* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].data == fd)
events[i].data = -1;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct uv__epoll_event events[1024];
struct uv__epoll_event* pe;
struct uv__epoll_event e;
ngx_queue_t* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
if (loop->nfds == 0) {
assert(ngx_queue_empty(&loop->watcher_queue));
return;
}
while (!ngx_queue_empty(&loop->watcher_queue)) {
q = ngx_queue_head(&loop->watcher_queue);
ngx_queue_remove(q);
ngx_queue_init(q);
w = ngx_queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data = w->fd;
if (w->events == 0)
op = UV__EPOLL_CTL_ADD;
else
op = UV__EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == UV__EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
nfds = uv__epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | UV__POLLERR | UV__POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == UV__EPOLLERR || pe->events == UV__EPOLLHUP)
pe->events |= w->pevents & (UV__EPOLLIN | UV__EPOLLOUT);
if (pe->events != 0) {
w->cb(loop, w, pe->events);
nevents++;
}
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
uint64_t uv__hrtime(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
void uv_loadavg(double avg[3]) {
struct sysinfo info;
if (sysinfo(&info) < 0) return;
avg[0] = (double) info.loads[0] / 65536.0;
avg[1] = (double) info.loads[1] / 65536.0;
avg[2] = (double) info.loads[2] / 65536.0;
}
int uv_exepath(char* buffer, size_t* size) {
ssize_t n;
if (!buffer || !size) {
return -1;
}
n = readlink("/proc/self/exe", buffer, *size - 1);
if (n <= 0) return -1;
buffer[n] = '\0';
*size = n;
return 0;
}
uint64_t uv_get_free_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
uint64_t uv_get_total_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
}
uv_err_t uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
ssize_t n;
long val;
int fd;
int i;
do
fd = open("/proc/self/stat", O_RDONLY);
while (fd == -1 && errno == EINTR);
if (fd == -1)
return uv__new_sys_error(errno);
do
n = read(fd, buf, sizeof(buf) - 1);
while (n == -1 && errno == EINTR);
SAVE_ERRNO(close(fd));
if (n == -1)
return uv__new_sys_error(errno);
buf[n] = '\0';
s = strchr(buf, ' ');
if (s == NULL)
goto err;
s += 1;
if (*s != '(')
goto err;
s = strchr(s, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (errno != 0)
goto err;
if (val < 0)
goto err;
*rss = val * getpagesize();
return uv_ok_;
err:
return uv__new_artificial_error(UV_EINVAL);
}
uv_err_t uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
struct timespec now;
int r;
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
goto retry;
}
if (r)
return uv__new_sys_error(errno);
*uptime = now.tv_sec;
*uptime += (double)now.tv_nsec / 1000000000.0;
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int numcpus;
uv_cpu_info_t* ci;
*cpu_infos = NULL;
*count = 0;
numcpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(numcpus != (unsigned int) -1);
assert(numcpus != 0);
ci = calloc(numcpus, sizeof(*ci));
if (ci == NULL)
return uv__new_sys_error(ENOMEM);
if (read_models(numcpus, ci)) {
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
return uv__new_sys_error(errno);
}
if (read_times(numcpus, ci)) {
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
return uv__new_sys_error(errno);
}
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
* We don't check for errors here. Worst case, the field is left zero.
*/
if (ci[0].speed == 0)
read_speeds(numcpus, ci);
*cpu_infos = ci;
*count = numcpus;
return uv_ok_;
}
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
unsigned int num;
for (num = 0; num < numcpus; num++)
ci[num].speed = read_cpufreq(num) / 1000;
}
/* Also reads the CPU frequency on x86. The other architectures only have
* a BogoMIPS field, which may not be very accurate.
*
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
*/
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
char buf[1024];
char* model;
FILE* fp;
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
(void) &model_marker;
(void) &speed_marker;
(void) &speed_idx;
(void) &model;
(void) &buf;
(void) &fp;
model_idx = 0;
speed_idx = 0;
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
defined(__x86_64__)
fp = fopen("/proc/cpuinfo", "r");
if (fp == NULL)
return -1;
while (fgets(buf, sizeof(buf), fp)) {
if (model_idx < numcpus) {
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return -1;
}
ci[model_idx++].model = model;
continue;
}
}
#if defined(__arm__) || defined(__mips__)
if (model_idx < numcpus) {
#if defined(__arm__)
/* Fallback for pre-3.8 kernels. */
static const char model_marker[] = "Processor\t: ";
#else /* defined(__mips__) */
static const char model_marker[] = "cpu model\t\t: ";
#endif
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return -1;
}
ci[model_idx++].model = model;
continue;
}
}
#else /* !__arm__ && !__mips__ */
if (speed_idx < numcpus) {
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
continue;
}
}
#endif /* __arm__ || __mips__ */
}
fclose(fp);
#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
* isn't one, in that case we simply put "unknown" into everything.
*/
inferred_model = "unknown";
if (model_idx > 0)
inferred_model = ci[model_idx - 1].model;
while (model_idx < numcpus) {
model = strndup(inferred_model, strlen(inferred_model));
if (model == NULL)
return -1;
ci[model_idx++].model = model;
}
return 0;
}
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci) {
unsigned long clock_ticks;
struct uv_cpu_times_s ts;
unsigned long user;
unsigned long nice;
unsigned long sys;
unsigned long idle;
unsigned long dummy;
unsigned long irq;
unsigned int num;
unsigned int len;
char buf[1024];
FILE* fp;
clock_ticks = sysconf(_SC_CLK_TCK);
assert(clock_ticks != (unsigned long) -1);
assert(clock_ticks != 0);
fp = fopen("/proc/stat", "r");
if (fp == NULL)
return -1;
if (!fgets(buf, sizeof(buf), fp))
abort();
num = 0;
while (fgets(buf, sizeof(buf), fp)) {
if (num >= numcpus)
break;
if (strncmp(buf, "cpu", 3))
break;
/* skip "cpu<num> " marker */
{
unsigned int n;
int r = sscanf(buf, "cpu%u ", &n);
assert(r == 1);
(void) r; /* silence build warning */
for (len = sizeof("cpu0"); n /= 10; len++);
}
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
* guest, guest_nice but we're only interested in the first four + irq.
*
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
* fields, they're not allowed in C89 mode.
*/
if (6 != sscanf(buf + len,
"%lu %lu %lu %lu %lu %lu",
&user,
&nice,
&sys,
&idle,
&dummy,
&irq))
abort();
ts.user = clock_ticks * user;
ts.nice = clock_ticks * nice;
ts.sys = clock_ticks * sys;
ts.idle = clock_ticks * idle;
ts.irq = clock_ticks * irq;
ci[num++].cpu_times = ts;
}
fclose(fp);
assert(num == numcpus);
return 0;
}
static unsigned long read_cpufreq(unsigned int cpunum) {
unsigned long val;
char buf[1024];
FILE* fp;
snprintf(buf,
sizeof(buf),
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
cpunum);
fp = fopen(buf, "r");
if (fp == NULL)
return 0;
if (fscanf(fp, "%lu", &val) != 1)
val = 0;
fclose(fp);
return val;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
#ifndef HAVE_IFADDRS_H
return uv__new_artificial_error(UV_ENOSYS);
#else
struct ifaddrs *addrs, *ent;
char ip[INET6_ADDRSTRLEN];
uv_interface_address_t* address;
if (getifaddrs(&addrs) != 0) {
return uv__new_sys_error(errno);
}
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == PF_PACKET)) {
continue;
}
(*count)++;
}
*addresses = (uv_interface_address_t*)
malloc(*count * sizeof(uv_interface_address_t));
if (!(*addresses)) {
return uv__new_artificial_error(UV_ENOMEM);
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
bzero(&ip, sizeof (ip));
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING)) {
continue;
}
if (ent->ifa_addr == NULL) {
continue;
}
/*
* On Linux getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information.
*/
if (ent->ifa_addr->sa_family == PF_PACKET) {
continue;
}
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6 *)ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in *)ent->ifa_addr);
}
address->is_internal = ent->ifa_flags & IFF_LOOPBACK ? 1 : 0;
address++;
}
freeifaddrs(addrs);
return uv_ok_;
#endif
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}
void uv__set_process_title(const char* title) {
#if defined(PR_SET_NAME)
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
#endif
}

View File

@ -1,239 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "tree.h"
#include "internal.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <unistd.h>
struct watcher_list {
RB_ENTRY(watcher_list) entry;
ngx_queue_t watchers;
char* path;
int wd;
};
struct watcher_root {
struct watcher_list* rbh_root;
};
#define CAST(p) ((struct watcher_root*)(p))
/* Don't look aghast, this is exactly how glibc's basename() works. */
static char* basename_r(const char* path) {
char* s = strrchr(path, '/');
return s ? (s + 1) : (char*)path;
}
static int compare_watchers(const struct watcher_list* a,
const struct watcher_list* b) {
if (a->wd < b->wd) return -1;
if (a->wd > b->wd) return 1;
return 0;
}
RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents);
static int new_inotify_fd(void) {
int fd;
fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC);
if (fd != -1)
return fd;
if (errno != ENOSYS)
return -1;
if ((fd = uv__inotify_init()) == -1)
return -1;
if (uv__cloexec(fd, 1) || uv__nonblock(fd, 1)) {
SAVE_ERRNO(close(fd));
return -1;
}
return fd;
}
static int init_inotify(uv_loop_t* loop) {
if (loop->inotify_fd != -1)
return 0;
loop->inotify_fd = new_inotify_fd();
if (loop->inotify_fd == -1) {
uv__set_sys_error(loop, errno);
return -1;
}
uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
uv__io_start(loop, &loop->inotify_read_watcher, UV__POLLIN);
return 0;
}
static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
struct watcher_list w;
w.wd = wd;
return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
}
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* dummy,
unsigned int events) {
const struct uv__inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
ngx_queue_t* q;
const char* path;
ssize_t size;
const char *p;
/* needs to be large enough for sizeof(inotify_event) + strlen(filename) */
char buf[4096];
while (1) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);
if (size == -1) {
assert(errno == EAGAIN || errno == EWOULDBLOCK);
break;
}
assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
/* Now we have one or more inotify_event structs. */
for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
e = (const struct uv__inotify_event*)p;
events = 0;
if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY))
events |= UV_CHANGE;
if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY))
events |= UV_RENAME;
w = find_watcher(loop, e->wd);
if (w == NULL)
continue; /* Stale event, no watchers left. */
/* inotify does not return the filename when monitoring a single file
* for modifications. Repurpose the filename for API compatibility.
* I'm not convinced this is a good thing, maybe it should go.
*/
path = e->len ? (const char*) (e + 1) : basename_r(w->path);
ngx_queue_foreach(q, &w->watchers) {
h = ngx_queue_data(q, uv_fs_event_t, watchers);
h->cb(h, path, events, 0);
}
}
}
}
int uv_fs_event_init(uv_loop_t* loop,
uv_fs_event_t* handle,
const char* path,
uv_fs_event_cb cb,
int flags) {
struct watcher_list* w;
int events;
int wd;
size_t pathsz;
if (init_inotify(loop)) return -1;
events = UV__IN_ATTRIB
| UV__IN_CREATE
| UV__IN_MODIFY
| UV__IN_DELETE
| UV__IN_DELETE_SELF
| UV__IN_MOVE_SELF
| UV__IN_MOVED_FROM
| UV__IN_MOVED_TO;
wd = uv__inotify_add_watch(loop->inotify_fd, path, events);
if (wd == -1)
return uv__set_sys_error(loop, errno);
w = find_watcher(loop, wd);
if (w)
goto no_insert;
pathsz = strlen(path) + 1;
w = malloc(sizeof(*w) + pathsz);
if (w == NULL)
return uv__set_sys_error(loop, ENOMEM);
w->wd = wd;
uv_strlcpy((char*)(w + 1), path, pathsz);
w->path = (char*)(w + 1);
ngx_queue_init(&w->watchers);
RB_INSERT(watcher_root, CAST(&loop->inotify_watchers), w);
no_insert:
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
uv__handle_start(handle); /* FIXME shouldn't start automatically */
ngx_queue_insert_tail(&w->watchers, &handle->watchers);
handle->filename = w->path;
handle->cb = cb;
handle->wd = wd;
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
struct watcher_list* w;
w = find_watcher(handle->loop, handle->wd);
assert(w != NULL);
handle->wd = -1;
handle->filename = NULL;
uv__handle_stop(handle);
ngx_queue_remove(&handle->watchers);
if (ngx_queue_empty(&w->watchers)) {
/* No watchers left for this path. Clean up. */
RB_REMOVE(watcher_root, CAST(&handle->loop->inotify_watchers), w);
uv__inotify_rm_watch(handle->loop->inotify_fd, w->wd);
free(w);
}
}

View File

@ -1,388 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "linux-syscalls.h"
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <errno.h>
#if defined(__i386__)
# ifndef __NR_socketcall
# define __NR_socketcall 102
# endif
#endif
#if defined(__arm__)
# if defined(__thumb__) || defined(__ARM_EABI__)
# define UV_SYSCALL_BASE 0
# else
# define UV_SYSCALL_BASE 0x900000
# endif
#endif /* __arm__ */
#ifndef __NR_accept4
# if defined(__x86_64__)
# define __NR_accept4 288
# elif defined(__i386__)
/* Nothing. Handled through socketcall(). */
# elif defined(__arm__)
# define __NR_accept4 (UV_SYSCALL_BASE + 366)
# endif
#endif /* __NR_accept4 */
#ifndef __NR_eventfd
# if defined(__x86_64__)
# define __NR_eventfd 284
# elif defined(__i386__)
# define __NR_eventfd 323
# elif defined(__arm__)
# define __NR_eventfd (UV_SYSCALL_BASE + 351)
# endif
#endif /* __NR_eventfd */
#ifndef __NR_eventfd2
# if defined(__x86_64__)
# define __NR_eventfd2 290
# elif defined(__i386__)
# define __NR_eventfd2 328
# elif defined(__arm__)
# define __NR_eventfd2 (UV_SYSCALL_BASE + 356)
# endif
#endif /* __NR_eventfd2 */
#ifndef __NR_epoll_create
# if defined(__x86_64__)
# define __NR_epoll_create 213
# elif defined(__i386__)
# define __NR_epoll_create 254
# elif defined(__arm__)
# define __NR_epoll_create (UV_SYSCALL_BASE + 250)
# endif
#endif /* __NR_epoll_create */
#ifndef __NR_epoll_create1
# if defined(__x86_64__)
# define __NR_epoll_create1 291
# elif defined(__i386__)
# define __NR_epoll_create1 329
# elif defined(__arm__)
# define __NR_epoll_create1 (UV_SYSCALL_BASE + 357)
# endif
#endif /* __NR_epoll_create1 */
#ifndef __NR_epoll_ctl
# if defined(__x86_64__)
# define __NR_epoll_ctl 233 /* used to be 214 */
# elif defined(__i386__)
# define __NR_epoll_ctl 255
# elif defined(__arm__)
# define __NR_epoll_ctl (UV_SYSCALL_BASE + 251)
# endif
#endif /* __NR_epoll_ctl */
#ifndef __NR_epoll_wait
# if defined(__x86_64__)
# define __NR_epoll_wait 232 /* used to be 215 */
# elif defined(__i386__)
# define __NR_epoll_wait 256
# elif defined(__arm__)
# define __NR_epoll_wait (UV_SYSCALL_BASE + 252)
# endif
#endif /* __NR_epoll_wait */
#ifndef __NR_epoll_pwait
# if defined(__x86_64__)
# define __NR_epoll_pwait 281
# elif defined(__i386__)
# define __NR_epoll_pwait 319
# elif defined(__arm__)
# define __NR_epoll_pwait (UV_SYSCALL_BASE + 346)
# endif
#endif /* __NR_epoll_pwait */
#ifndef __NR_inotify_init
# if defined(__x86_64__)
# define __NR_inotify_init 253
# elif defined(__i386__)
# define __NR_inotify_init 291
# elif defined(__arm__)
# define __NR_inotify_init (UV_SYSCALL_BASE + 316)
# endif
#endif /* __NR_inotify_init */
#ifndef __NR_inotify_init1
# if defined(__x86_64__)
# define __NR_inotify_init1 294
# elif defined(__i386__)
# define __NR_inotify_init1 332
# elif defined(__arm__)
# define __NR_inotify_init1 (UV_SYSCALL_BASE + 360)
# endif
#endif /* __NR_inotify_init1 */
#ifndef __NR_inotify_add_watch
# if defined(__x86_64__)
# define __NR_inotify_add_watch 254
# elif defined(__i386__)
# define __NR_inotify_add_watch 292
# elif defined(__arm__)
# define __NR_inotify_add_watch (UV_SYSCALL_BASE + 317)
# endif
#endif /* __NR_inotify_add_watch */
#ifndef __NR_inotify_rm_watch
# if defined(__x86_64__)
# define __NR_inotify_rm_watch 255
# elif defined(__i386__)
# define __NR_inotify_rm_watch 293
# elif defined(__arm__)
# define __NR_inotify_rm_watch (UV_SYSCALL_BASE + 318)
# endif
#endif /* __NR_inotify_rm_watch */
#ifndef __NR_pipe2
# if defined(__x86_64__)
# define __NR_pipe2 293
# elif defined(__i386__)
# define __NR_pipe2 331
# elif defined(__arm__)
# define __NR_pipe2 (UV_SYSCALL_BASE + 359)
# endif
#endif /* __NR_pipe2 */
#ifndef __NR_recvmmsg
# if defined(__x86_64__)
# define __NR_recvmmsg 299
# elif defined(__i386__)
# define __NR_recvmmsg 337
# elif defined(__arm__)
# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
# endif
#endif /* __NR_recvmsg */
#ifndef __NR_sendmmsg
# if defined(__x86_64__)
# define __NR_sendmmsg 307
# elif defined(__i386__)
# define __NR_sendmmsg 345
# elif defined(__arm__)
# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
# endif
#endif /* __NR_sendmmsg */
#ifndef __NR_utimensat
# if defined(__x86_64__)
# define __NR_utimensat 280
# elif defined(__i386__)
# define __NR_utimensat 320
# elif defined(__arm__)
# define __NR_utimensat (UV_SYSCALL_BASE + 348)
# endif
#endif /* __NR_utimensat */
int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) {
#if defined(__i386__)
unsigned long args[4];
int r;
args[0] = (unsigned long) fd;
args[1] = (unsigned long) addr;
args[2] = (unsigned long) addrlen;
args[3] = (unsigned long) flags;
r = syscall(__NR_socketcall, 18 /* SYS_ACCEPT4 */, args);
/* socketcall() raises EINVAL when SYS_ACCEPT4 is not supported but so does
* a bad flags argument. Try to distinguish between the two cases.
*/
if (r == -1)
if (errno == EINVAL)
if ((flags & ~(UV__SOCK_CLOEXEC|UV__SOCK_NONBLOCK)) == 0)
errno = ENOSYS;
return r;
#elif defined(__NR_accept4)
return syscall(__NR_accept4, fd, addr, addrlen, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__eventfd(unsigned int count) {
#if defined(__NR_eventfd)
return syscall(__NR_eventfd, count);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__eventfd2(unsigned int count, int flags) {
#if defined(__NR_eventfd2)
return syscall(__NR_eventfd2, count, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_create(int size) {
#if defined(__NR_epoll_create)
return syscall(__NR_epoll_create, size);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_create1(int flags) {
#if defined(__NR_epoll_create1)
return syscall(__NR_epoll_create1, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event* events) {
#if defined(__NR_epoll_ctl)
return syscall(__NR_epoll_ctl, epfd, op, fd, events);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_wait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout) {
#if defined(__NR_epoll_wait)
return syscall(__NR_epoll_wait, epfd, events, nevents, timeout);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_pwait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout,
const sigset_t* sigmask) {
#if defined(__NR_epoll_pwait)
return syscall(__NR_epoll_pwait,
epfd,
events,
nevents,
timeout,
sigmask,
sizeof(*sigmask));
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_init(void) {
#if defined(__NR_inotify_init)
return syscall(__NR_inotify_init);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_init1(int flags) {
#if defined(__NR_inotify_init1)
return syscall(__NR_inotify_init1, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_add_watch(int fd, const char* path, uint32_t mask) {
#if defined(__NR_inotify_add_watch)
return syscall(__NR_inotify_add_watch, fd, path, mask);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_rm_watch(int fd, int32_t wd) {
#if defined(__NR_inotify_rm_watch)
return syscall(__NR_inotify_rm_watch, fd, wd);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__pipe2(int pipefd[2], int flags) {
#if defined(__NR_pipe2)
return syscall(__NR_pipe2, pipefd, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__sendmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags) {
#if defined(__NR_sendmmsg)
return syscall(__NR_sendmmsg, fd, mmsg, vlen, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__recvmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags,
struct timespec* timeout) {
#if defined(__NR_recvmmsg)
return syscall(__NR_recvmmsg, fd, mmsg, vlen, flags, timeout);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__utimesat(int dirfd,
const char* path,
const struct timespec times[2],
int flags)
{
#if defined(__NR_utimensat)
return syscall(__NR_utimensat, dirfd, path, times, flags);
#else
return errno = ENOSYS, -1;
#endif
}

View File

@ -1,150 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_LINUX_SYSCALL_H_
#define UV_LINUX_SYSCALL_H_
#undef _GNU_SOURCE
#define _GNU_SOURCE
#include <stdint.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#if defined(__alpha__)
# define UV__O_CLOEXEC 0x200000
#elif defined(__hppa__)
# define UV__O_CLOEXEC 0x200000
#elif defined(__sparc__)
# define UV__O_CLOEXEC 0x400000
#else
# define UV__O_CLOEXEC 0x80000
#endif
#if defined(__alpha__)
# define UV__O_NONBLOCK 0x4
#elif defined(__hppa__)
# define UV__O_NONBLOCK 0x10004
#elif defined(__mips__)
# define UV__O_NONBLOCK 0x80
#elif defined(__sparc__)
# define UV__O_NONBLOCK 0x4000
#else
# define UV__O_NONBLOCK 0x800
#endif
#define UV__EFD_CLOEXEC UV__O_CLOEXEC
#define UV__EFD_NONBLOCK UV__O_NONBLOCK
#define UV__IN_CLOEXEC UV__O_CLOEXEC
#define UV__IN_NONBLOCK UV__O_NONBLOCK
#define UV__SOCK_CLOEXEC UV__O_CLOEXEC
#define UV__SOCK_NONBLOCK UV__O_NONBLOCK
/* epoll flags */
#define UV__EPOLL_CLOEXEC UV__O_CLOEXEC
#define UV__EPOLL_CTL_ADD 1
#define UV__EPOLL_CTL_DEL 2
#define UV__EPOLL_CTL_MOD 3
#define UV__EPOLLIN 1
#define UV__EPOLLOUT 4
#define UV__EPOLLERR 8
#define UV__EPOLLHUP 16
#define UV__EPOLLONESHOT 0x40000000
#define UV__EPOLLET 0x80000000
/* inotify flags */
#define UV__IN_ACCESS 0x001
#define UV__IN_MODIFY 0x002
#define UV__IN_ATTRIB 0x004
#define UV__IN_CLOSE_WRITE 0x008
#define UV__IN_CLOSE_NOWRITE 0x010
#define UV__IN_OPEN 0x020
#define UV__IN_MOVED_FROM 0x040
#define UV__IN_MOVED_TO 0x080
#define UV__IN_CREATE 0x100
#define UV__IN_DELETE 0x200
#define UV__IN_DELETE_SELF 0x400
#define UV__IN_MOVE_SELF 0x800
#if defined(__x86_64__)
struct uv__epoll_event {
uint32_t events;
uint64_t data;
} __attribute__((packed));
#else
struct uv__epoll_event {
uint32_t events;
uint64_t data;
};
#endif
struct uv__inotify_event {
int32_t wd;
uint32_t mask;
uint32_t cookie;
uint32_t len;
/* char name[0]; */
};
struct uv__mmsghdr {
struct msghdr msg_hdr;
unsigned int msg_len;
};
int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags);
int uv__eventfd(unsigned int count);
int uv__epoll_create(int size);
int uv__epoll_create1(int flags);
int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event *ev);
int uv__epoll_wait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout);
int uv__epoll_pwait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout,
const sigset_t* sigmask);
int uv__eventfd2(unsigned int count, int flags);
int uv__inotify_init(void);
int uv__inotify_init1(int flags);
int uv__inotify_add_watch(int fd, const char* path, uint32_t mask);
int uv__inotify_rm_watch(int fd, int32_t wd);
int uv__pipe2(int pipefd[2], int flags);
int uv__recvmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags,
struct timespec* timeout);
int uv__sendmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags);
int uv__utimesat(int dirfd,
const char* path,
const struct timespec times[2],
int flags);
#endif /* UV_LINUX_SYSCALL_H_ */

View File

@ -1,64 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#define UV_LOOP_WATCHER_DEFINE(name, type) \
int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \
handle->name##_cb = NULL; \
return 0; \
} \
\
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
if (uv__is_active(handle)) return 0; \
if (cb == NULL) \
return uv__set_artificial_error(handle->loop, UV_EINVAL); \
ngx_queue_insert_head(&handle->loop->name##_handles, &handle->queue); \
handle->name##_cb = cb; \
uv__handle_start(handle); \
return 0; \
} \
\
int uv_##name##_stop(uv_##name##_t* handle) { \
if (!uv__is_active(handle)) return 0; \
ngx_queue_remove(&handle->queue); \
uv__handle_stop(handle); \
return 0; \
} \
\
void uv__run_##name(uv_loop_t* loop) { \
uv_##name##_t* h; \
ngx_queue_t* q; \
ngx_queue_foreach(q, &loop->name##_handles) { \
h = ngx_queue_data(q, uv_##name##_t, queue); \
h->name##_cb(h, 0); \
} \
} \
\
void uv__##name##_close(uv_##name##_t* handle) { \
uv_##name##_stop(handle); \
}
UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
UV_LOOP_WATCHER_DEFINE(check, CHECK)
UV_LOOP_WATCHER_DEFINE(idle, IDLE)

View File

@ -1,114 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "tree.h"
#include "internal.h"
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int uv__loop_init(uv_loop_t* loop, int default_loop) {
unsigned int i;
uv__signal_global_once_init();
memset(loop, 0, sizeof(*loop));
RB_INIT(&loop->timer_handles);
ngx_queue_init(&loop->wq);
ngx_queue_init(&loop->active_reqs);
ngx_queue_init(&loop->idle_handles);
ngx_queue_init(&loop->async_handles);
ngx_queue_init(&loop->check_handles);
ngx_queue_init(&loop->prepare_handles);
ngx_queue_init(&loop->handle_queue);
loop->nfds = 0;
loop->watchers = NULL;
loop->nwatchers = 0;
ngx_queue_init(&loop->pending_queue);
ngx_queue_init(&loop->watcher_queue);
loop->closing_handles = NULL;
loop->time = uv__hrtime() / 1000000;
uv__async_init(&loop->async_watcher);
loop->signal_pipefd[0] = -1;
loop->signal_pipefd[1] = -1;
loop->backend_fd = -1;
loop->emfile_fd = -1;
loop->timer_counter = 0;
loop->stop_flag = 0;
if (uv__platform_loop_init(loop, default_loop))
return -1;
uv_signal_init(loop, &loop->child_watcher);
uv__handle_unref(&loop->child_watcher);
loop->child_watcher.flags |= UV__HANDLE_INTERNAL;
for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++)
ngx_queue_init(loop->process_handles + i);
if (uv_mutex_init(&loop->wq_mutex))
abort();
if (uv_async_init(loop, &loop->wq_async, uv__work_done))
abort();
uv__handle_unref(&loop->wq_async);
loop->wq_async.flags |= UV__HANDLE_INTERNAL;
return 0;
}
void uv__loop_delete(uv_loop_t* loop) {
uv__signal_loop_cleanup(loop);
uv__platform_loop_delete(loop);
uv__async_stop(loop, &loop->async_watcher);
if (loop->emfile_fd != -1) {
close(loop->emfile_fd);
loop->emfile_fd = -1;
}
if (loop->backend_fd != -1) {
close(loop->backend_fd);
loop->backend_fd = -1;
}
uv_mutex_lock(&loop->wq_mutex);
assert(ngx_queue_empty(&loop->wq) && "thread pool work queue not empty!");
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
#if 0
assert(ngx_queue_empty(&loop->pending_queue));
assert(ngx_queue_empty(&loop->watcher_queue));
assert(loop->nfds == 0);
#endif
free(loop->watchers);
loop->watchers = NULL;
loop->nwatchers = 0;
}

View File

@ -1,353 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <kvm.h>
#include <paths.h>
#include <ifaddrs.h>
#include <unistd.h>
#include <time.h>
#include <stdlib.h>
#include <fcntl.h>
#include <net/if.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <unistd.h>
#include <time.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
static char *process_title;
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) == -1) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
size_t cb;
pid_t mypid;
if (!buffer || !size) {
return -1;
}
mypid = getpid();
mib[0] = CTL_KERN;
mib[1] = KERN_PROC_ARGS;
mib[2] = mypid;
mib[3] = KERN_PROC_ARGV;
cb = *size;
if (sysctl(mib, 4, buffer, &cb, NULL, 0) == -1) {
*size = 0;
return -1;
}
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
struct uvmexp info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, 2, &info, &size, NULL, 0) == -1) {
return -1;
}
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
#if defined(HW_PHYSMEM64)
uint64_t info;
int which[] = {CTL_HW, HW_PHYSMEM64};
#else
unsigned int info;
int which[] = {CTL_HW, HW_PHYSMEM};
#endif
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0) == -1) {
return -1;
}
return (uint64_t) info;
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc ? strdup(argv[0]) : NULL;
return argv;
}
uv_err_t uv_set_process_title(const char* title) {
if (process_title) free(process_title);
process_title = strdup(title);
setproctitle("%s", title);
return uv_ok_;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
if (process_title) {
strncpy(buffer, process_title, size);
} else {
if (size > 0) {
buffer[0] = '\0';
}
}
return uv_ok_;
}
uv_err_t uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc2 *kinfo = NULL;
pid_t pid;
int nprocs;
int max_size = sizeof(struct kinfo_proc2);
int page_size;
page_size = getpagesize();
pid = getpid();
kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open");
if (kd == NULL) goto error;
kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs);
if (kinfo == NULL) goto error;
*rss = kinfo->p_vm_rssize * page_size;
kvm_close(kd);
return uv_ok_;
error:
if (kd) kvm_close(kd);
return uv__new_sys_error(errno);
}
uv_err_t uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0) == -1) {
return uv__new_sys_error(errno);
}
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK);
unsigned int multiplier = ((uint64_t)1000L / ticks);
unsigned int cur = 0;
uv_cpu_info_t* cpu_info;
u_int64_t* cp_times;
char model[512];
u_int64_t cpuspeed;
int numcpus;
size_t size;
int i;
size = sizeof(model);
if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) == -1 &&
sysctlbyname("hw.model", &model, &size, NULL, 0) == -1) {
return uv__new_sys_error(errno);
}
size = sizeof(numcpus);
if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0) == -1) {
return uv__new_sys_error(errno);
}
*count = numcpus;
/* Only i386 and amd64 have machdep.tsc_freq */
size = sizeof(cpuspeed);
if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0) == -1) {
cpuspeed = 0;
}
size = numcpus * CPUSTATES * sizeof(*cp_times);
cp_times = malloc(size);
if (cp_times == NULL) {
return uv__new_artificial_error(UV_ENOMEM);
}
if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0) == -1) {
return uv__new_sys_error(errno);
}
*cpu_infos = malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos)) {
free(cp_times);
free(*cpu_infos);
return uv__new_artificial_error(UV_ENOMEM);
}
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6);
cur += CPUSTATES;
}
free(cp_times);
return uv_ok_;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifaddrs *addrs;
struct ifaddrs *ent;
uv_interface_address_t* address;
if (getifaddrs(&addrs) != 0) {
return uv__new_sys_error(errno);
}
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != PF_INET)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses)) {
return uv__new_artificial_error(UV_ENOMEM);
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING)) {
continue;
}
if (ent->ifa_addr == NULL) {
continue;
}
if (ent->ifa_addr->sa_family != PF_INET) {
continue;
}
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6 *)ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in *)ent->ifa_addr);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK) ? 1 : 0;
address++;
}
freeifaddrs(addrs);
return uv_ok_;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -1,304 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/sched.h>
#include <sys/time.h>
#include <sys/sysctl.h>
#include <errno.h>
#include <fcntl.h>
#include <kvm.h>
#include <paths.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
static char *process_title;
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(void) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
char **argsbuf = NULL;
char **argsbuf_tmp;
size_t argsbuf_size = 100U;
size_t exepath_size;
pid_t mypid;
int status = -1;
if (!buffer || !size) {
goto out;
}
mypid = getpid();
for (;;) {
if ((argsbuf_tmp = realloc(argsbuf, argsbuf_size)) == NULL) {
goto out;
}
argsbuf = argsbuf_tmp;
mib[0] = CTL_KERN;
mib[1] = KERN_PROC_ARGS;
mib[2] = mypid;
mib[3] = KERN_PROC_ARGV;
if (sysctl(mib, 4, argsbuf, &argsbuf_size, NULL, 0) == 0) {
break;
}
if (errno != ENOMEM) {
goto out;
}
argsbuf_size *= 2U;
}
if (argsbuf[0] == NULL) {
goto out;
}
exepath_size = strlen(argsbuf[0]);
if (exepath_size >= *size) {
goto out;
}
memcpy(buffer, argsbuf[0], exepath_size + 1U);
*size = exepath_size;
status = 0;
out:
free(argsbuf);
return status;
}
uint64_t uv_get_free_memory(void) {
struct uvmexp info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return -1;
}
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
uint64_t info;
int which[] = {CTL_HW, HW_PHYSMEM64};
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return -1;
}
return (uint64_t) info;
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc ? strdup(argv[0]) : NULL;
return argv;
}
uv_err_t uv_set_process_title(const char* title) {
if (process_title) free(process_title);
process_title = strdup(title);
setproctitle(title);
return uv_ok_;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
if (process_title) {
strncpy(buffer, process_title, size);
} else {
if (size > 0) {
buffer[0] = '\0';
}
}
return uv_ok_;
}
uv_err_t uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc *kinfo = NULL;
pid_t pid;
int nprocs, max_size = sizeof(struct kinfo_proc);
size_t page_size = getpagesize();
pid = getpid();
kd = kvm_open(NULL, _PATH_MEM, NULL, O_RDONLY, "kvm_open");
if (kd == NULL) goto error;
kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, max_size, &nprocs);
if (kinfo == NULL) goto error;
*rss = kinfo->p_vm_rssize * page_size;
kvm_close(kd);
return uv_ok_;
error:
if (kd) kvm_close(kd);
return uv__new_sys_error(errno);
}
uv_err_t uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed;
uint64_t info[CPUSTATES];
char model[512];
int numcpus = 1;
int which[] = {CTL_HW,HW_MODEL,0};
size_t size;
int i;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctl(which, 2, &model, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
which[1] = HW_NCPU;
size = sizeof(numcpus);
if (sysctl(which, 2, &numcpus, &size, NULL, 0) < 0) {
return uv__new_sys_error(errno);
}
*cpu_infos = (uv_cpu_info_t*)malloc(numcpus * sizeof(uv_cpu_info_t));
if (!(*cpu_infos)) {
return uv__new_artificial_error(UV_ENOMEM);
}
*count = numcpus;
which[1] = HW_CPUSPEED;
size = sizeof(cpuspeed);
if (sysctl(which, 2, &cpuspeed, &size, NULL, 0) < 0) {
free(*cpu_infos);
return uv__new_sys_error(errno);
}
size = sizeof(info);
which[0] = CTL_KERN;
which[1] = KERN_CPTIME2;
for (i = 0; i < numcpus; i++) {
which[2] = i;
size = sizeof(info);
if (sysctl(which, 3, &info, &size, NULL, 0) < 0) {
free(*cpu_infos);
return uv__new_sys_error(errno);
}
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed;
}
return uv_ok_;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
/* TODO: implement */
*addresses = NULL;
*count = 0;
return uv_ok_;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
}

View File

@ -1,261 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdlib.h>
static void uv__pipe_accept(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
handle->shutdown_req = NULL;
handle->connect_req = NULL;
handle->pipe_fname = NULL;
handle->ipc = ipc;
return 0;
}
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
struct sockaddr_un saddr;
const char* pipe_fname;
int saved_errno;
int sockfd;
int status;
int bound;
saved_errno = errno;
pipe_fname = NULL;
sockfd = -1;
status = -1;
bound = 0;
/* Already bound? */
if (uv__stream_fd(handle) >= 0) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
goto out;
}
/* Make a copy of the file name, it outlives this function's scope. */
if ((pipe_fname = strdup(name)) == NULL) {
uv__set_sys_error(handle->loop, ENOMEM);
goto out;
}
/* We've got a copy, don't touch the original any more. */
name = NULL;
if ((sockfd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
uv__set_sys_error(handle->loop, errno);
goto out;
}
memset(&saddr, 0, sizeof saddr);
uv_strlcpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
saddr.sun_family = AF_UNIX;
if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
/* Convert ENOENT to EACCES for compatibility with Windows. */
uv__set_sys_error(handle->loop, (errno == ENOENT) ? EACCES : errno);
goto out;
}
bound = 1;
/* Success. */
handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
handle->io_watcher.fd = sockfd;
status = 0;
out:
/* Clean up on error. */
if (status) {
if (bound) {
/* unlink() before close() to avoid races. */
assert(pipe_fname != NULL);
unlink(pipe_fname);
}
close(sockfd);
free((void*)pipe_fname);
}
errno = saved_errno;
return status;
}
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
int saved_errno;
int status;
saved_errno = errno;
status = -1;
if (uv__stream_fd(handle) == -1) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
goto out;
}
assert(uv__stream_fd(handle) >= 0);
if ((status = listen(uv__stream_fd(handle), backlog)) == -1) {
uv__set_sys_error(handle->loop, errno);
} else {
handle->connection_cb = cb;
handle->io_watcher.cb = uv__pipe_accept;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN);
}
out:
errno = saved_errno;
return status;
}
void uv__pipe_close(uv_pipe_t* handle) {
if (handle->pipe_fname) {
/*
* Unlink the file system entity before closing the file descriptor.
* Doing it the other way around introduces a race where our process
* unlinks a socket with the same name that's just been created by
* another thread or process.
*/
unlink(handle->pipe_fname);
free((void*)handle->pipe_fname);
handle->pipe_fname = NULL;
}
uv__stream_close((uv_stream_t*)handle);
}
int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
#if defined(__APPLE__)
if (uv__stream_try_select((uv_stream_t*) handle, &fd))
return -1;
#endif /* defined(__APPLE__) */
return uv__stream_open((uv_stream_t*)handle,
fd,
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
}
void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
struct sockaddr_un saddr;
int saved_errno;
int new_sock;
int err;
int r;
saved_errno = errno;
new_sock = (uv__stream_fd(handle) == -1);
err = -1;
if (new_sock)
if ((handle->io_watcher.fd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1)
goto out;
memset(&saddr, 0, sizeof saddr);
uv_strlcpy(saddr.sun_path, name, sizeof(saddr.sun_path));
saddr.sun_family = AF_UNIX;
do {
r = connect(uv__stream_fd(handle),
(struct sockaddr*)&saddr, sizeof saddr);
}
while (r == -1 && errno == EINTR);
if (r == -1)
if (errno != EINPROGRESS)
goto out;
if (new_sock)
if (uv__stream_open((uv_stream_t*)handle,
uv__stream_fd(handle),
UV_STREAM_READABLE | UV_STREAM_WRITABLE))
goto out;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN | UV__POLLOUT);
err = 0;
out:
handle->delayed_error = err ? errno : 0; /* Passed to callback. */
handle->connect_req = req;
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*)handle;
req->cb = cb;
ngx_queue_init(&req->queue);
/* Force callback to run on next tick in case of error. */
if (err != 0)
uv__io_feed(handle->loop, &handle->io_watcher);
/* Mimic the Windows pipe implementation, always
* return 0 and let the callback handle errors.
*/
errno = saved_errno;
}
/* TODO merge with uv__server_io()? */
static void uv__pipe_accept(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_pipe_t* pipe;
int saved_errno;
int sockfd;
saved_errno = errno;
pipe = container_of(w, uv_pipe_t, io_watcher);
assert(pipe->type == UV_NAMED_PIPE);
sockfd = uv__accept(uv__stream_fd(pipe));
if (sockfd == -1) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
uv__set_sys_error(pipe->loop, errno);
pipe->connection_cb((uv_stream_t*)pipe, -1);
}
} else {
pipe->accepted_fd = sockfd;
pipe->connection_cb((uv_stream_t*)pipe, 0);
if (pipe->accepted_fd == sockfd) {
/* The user hasn't called uv_accept() yet */
uv__io_stop(pipe->loop, &pipe->io_watcher, UV__POLLIN);
}
}
errno = saved_errno;
}
void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
}

View File

@ -1,108 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <unistd.h>
#include <assert.h>
#include <errno.h>
static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_poll_t* handle;
int pevents;
handle = container_of(w, uv_poll_t, io_watcher);
if (events & UV__POLLERR) {
uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
uv__handle_stop(handle);
uv__set_sys_error(handle->loop, EBADF);
handle->poll_cb(handle, -1, 0);
return;
}
pevents = 0;
if (events & UV__POLLIN)
pevents |= UV_READABLE;
if (events & UV__POLLOUT)
pevents |= UV_WRITABLE;
handle->poll_cb(handle, 0, pevents);
}
int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
uv__io_init(&handle->io_watcher, uv__poll_io, fd);
handle->poll_cb = NULL;
return 0;
}
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
uv_os_sock_t socket) {
return uv_poll_init(loop, handle, socket);
}
static void uv__poll_stop(uv_poll_t* handle) {
uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLIN | UV__POLLOUT);
uv__handle_stop(handle);
}
int uv_poll_stop(uv_poll_t* handle) {
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
uv__poll_stop(handle);
return 0;
}
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
int events;
assert((pevents & ~(UV_READABLE | UV_WRITABLE)) == 0);
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
uv__poll_stop(handle);
if (pevents == 0)
return 0;
events = 0;
if (pevents & UV_READABLE)
events |= UV__POLLIN;
if (pevents & UV_WRITABLE)
events |= UV__POLLOUT;
uv__io_start(handle->loop, &handle->io_watcher, events);
uv__handle_start(handle);
handle->poll_cb = poll_cb;
return 0;
}
void uv__poll_close(uv_poll_t* handle) {
uv__poll_stop(handle);
}

View File

@ -1,519 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#if defined(__APPLE__) && !TARGET_OS_IPHONE
# include <crt_externs.h>
# define environ (*_NSGetEnviron())
#else
extern char **environ;
#endif
static ngx_queue_t* uv__process_queue(uv_loop_t* loop, int pid) {
assert(pid > 0);
return loop->process_handles + pid % ARRAY_SIZE(loop->process_handles);
}
static uv_process_t* uv__process_find(uv_loop_t* loop, int pid) {
uv_process_t* handle;
ngx_queue_t* h;
ngx_queue_t* q;
h = uv__process_queue(loop, pid);
ngx_queue_foreach(q, h) {
handle = ngx_queue_data(q, uv_process_t, queue);
if (handle->pid == pid) return handle;
}
return NULL;
}
static void uv__chld(uv_signal_t* handle, int signum) {
uv_process_t* process;
int exit_status;
int term_signal;
int status;
pid_t pid;
assert(signum == SIGCHLD);
for (;;) {
do
pid = waitpid(-1, &status, WNOHANG);
while (pid == -1 && errno == EINTR);
if (pid == 0)
return;
if (pid == -1) {
if (errno == ECHILD)
return; /* XXX stop signal watcher? */
else
abort();
}
process = uv__process_find(handle->loop, pid);
if (process == NULL)
continue; /* XXX bug? abort? */
uv__handle_stop(process);
if (process->exit_cb == NULL)
continue;
exit_status = 0;
term_signal = 0;
if (WIFEXITED(status))
exit_status = WEXITSTATUS(status);
if (WIFSIGNALED(status))
term_signal = WTERMSIG(status);
if (process->errorno) {
uv__set_sys_error(process->loop, process->errorno);
exit_status = -1; /* execve() failed */
}
process->exit_cb(process, exit_status, term_signal);
}
}
int uv__make_socketpair(int fds[2], int flags) {
#if defined(__linux__)
static int no_cloexec;
if (no_cloexec)
goto skip;
if (socketpair(AF_UNIX, SOCK_STREAM | UV__SOCK_CLOEXEC | flags, 0, fds) == 0)
return 0;
/* Retry on EINVAL, it means SOCK_CLOEXEC is not supported.
* Anything else is a genuine error.
*/
if (errno != EINVAL)
return -1;
no_cloexec = 1;
skip:
#endif
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
return -1;
uv__cloexec(fds[0], 1);
uv__cloexec(fds[1], 1);
if (flags & UV__F_NONBLOCK) {
uv__nonblock(fds[0], 1);
uv__nonblock(fds[1], 1);
}
return 0;
}
int uv__make_pipe(int fds[2], int flags) {
#if defined(__linux__)
static int no_pipe2;
if (no_pipe2)
goto skip;
if (uv__pipe2(fds, flags | UV__O_CLOEXEC) == 0)
return 0;
if (errno != ENOSYS)
return -1;
no_pipe2 = 1;
skip:
#endif
if (pipe(fds))
return -1;
uv__cloexec(fds[0], 1);
uv__cloexec(fds[1], 1);
if (flags & UV__F_NONBLOCK) {
uv__nonblock(fds[0], 1);
uv__nonblock(fds[1], 1);
}
return 0;
}
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
*/
static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
int mask;
int fd;
mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM;
switch (container->flags & mask) {
case UV_IGNORE:
return 0;
case UV_CREATE_PIPE:
assert(container->data.stream != NULL);
if (container->data.stream->type != UV_NAMED_PIPE) {
errno = EINVAL;
return -1;
}
return uv__make_socketpair(fds, 0);
case UV_INHERIT_FD:
case UV_INHERIT_STREAM:
if (container->flags & UV_INHERIT_FD)
fd = container->data.fd;
else
fd = uv__stream_fd(container->data.stream);
if (fd == -1) {
errno = EINVAL;
return -1;
}
fds[1] = fd;
return 0;
default:
assert(0 && "Unexpected flags");
return -1;
}
}
static int uv__process_open_stream(uv_stdio_container_t* container,
int pipefds[2],
int writable) {
int flags;
if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0)
return 0;
if (close(pipefds[1]))
if (errno != EINTR && errno != EINPROGRESS)
abort();
pipefds[1] = -1;
uv__nonblock(pipefds[0], 1);
if (container->data.stream->type == UV_NAMED_PIPE &&
((uv_pipe_t*)container->data.stream)->ipc)
flags = UV_STREAM_READABLE | UV_STREAM_WRITABLE;
else if (writable)
flags = UV_STREAM_WRITABLE;
else
flags = UV_STREAM_READABLE;
return uv__stream_open(container->data.stream, pipefds[0], flags);
}
static void uv__process_close_stream(uv_stdio_container_t* container) {
if (!(container->flags & UV_CREATE_PIPE)) return;
uv__stream_close((uv_stream_t*)container->data.stream);
}
static void uv__write_int(int fd, int val) {
ssize_t n;
do
n = write(fd, &val, sizeof(val));
while (n == -1 && errno == EINTR);
if (n == -1 && errno == EPIPE)
return; /* parent process has quit */
assert(n == sizeof(val));
}
static void uv__process_child_init(uv_process_options_t options,
int stdio_count,
int (*pipes)[2],
int error_fd) {
int close_fd;
int use_fd;
int fd;
if (options.flags & UV_PROCESS_DETACHED)
setsid();
for (fd = 0; fd < stdio_count; fd++) {
close_fd = pipes[fd][0];
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set
*/
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
if (use_fd == -1) {
uv__write_int(error_fd, errno);
perror("failed to open stdio");
_exit(127);
}
}
}
if (fd == use_fd)
uv__cloexec(use_fd, 0);
else
dup2(use_fd, fd);
if (fd <= 2)
uv__nonblock(fd, 0);
if (close_fd != -1)
close(close_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd >= 0 && fd != use_fd)
close(use_fd);
}
if (options.cwd && chdir(options.cwd)) {
uv__write_int(error_fd, errno);
perror("chdir()");
_exit(127);
}
if ((options.flags & UV_PROCESS_SETGID) && setgid(options.gid)) {
uv__write_int(error_fd, errno);
perror("setgid()");
_exit(127);
}
if ((options.flags & UV_PROCESS_SETUID) && setuid(options.uid)) {
uv__write_int(error_fd, errno);
perror("setuid()");
_exit(127);
}
if (options.env) {
environ = options.env;
}
execvp(options.file, options.args);
uv__write_int(error_fd, errno);
perror("execvp()");
_exit(127);
}
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
const uv_process_options_t options) {
int signal_pipe[2] = { -1, -1 };
int (*pipes)[2];
int stdio_count;
ngx_queue_t* q;
ssize_t r;
pid_t pid;
int i;
assert(options.file != NULL);
assert(!(options.flags & ~(UV_PROCESS_DETACHED |
UV_PROCESS_SETGID |
UV_PROCESS_SETUID |
UV_PROCESS_WINDOWS_HIDE |
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
ngx_queue_init(&process->queue);
stdio_count = options.stdio_count;
if (stdio_count < 3)
stdio_count = 3;
pipes = malloc(stdio_count * sizeof(*pipes));
if (pipes == NULL) {
errno = ENOMEM;
goto error;
}
for (i = 0; i < stdio_count; i++) {
pipes[i][0] = -1;
pipes[i][1] = -1;
}
for (i = 0; i < options.stdio_count; i++)
if (uv__process_init_stdio(options.stdio + i, pipes[i]))
goto error;
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
*
* if ((pid = fork()) > 0) {
* kill(pid, SIGTERM);
* }
* else if (pid == 0) {
* execve("/bin/cat", argp, envp);
* }
*
* The parent sends a signal immediately after forking.
* Since the child may not have called `execve()` yet,
* there is no telling what process receives the signal,
* our fork or /bin/cat.
*
* To avoid ambiguity, we create a pipe with both ends
* marked close-on-exec. Then, after the call to `fork()`,
* the parent polls the read end until it EOFs or errors with EPIPE.
*/
if (uv__make_pipe(signal_pipe, 0))
goto error;
uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
pid = fork();
if (pid == -1) {
close(signal_pipe[0]);
close(signal_pipe[1]);
goto error;
}
if (pid == 0) {
uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
abort();
}
close(signal_pipe[1]);
process->errorno = 0;
do
r = read(signal_pipe[0], &process->errorno, sizeof(process->errorno));
while (r == -1 && errno == EINTR);
if (r == 0)
; /* okay, EOF */
else if (r == sizeof(process->errorno))
; /* okay, read errorno */
else if (r == -1 && errno == EPIPE)
; /* okay, got EPIPE */
else
abort();
close(signal_pipe[0]);
for (i = 0; i < options.stdio_count; i++) {
if (uv__process_open_stream(options.stdio + i, pipes[i], i == 0)) {
while (i--) uv__process_close_stream(options.stdio + i);
goto error;
}
}
q = uv__process_queue(loop, pid);
ngx_queue_insert_tail(q, &process->queue);
process->pid = pid;
process->exit_cb = options.exit_cb;
uv__handle_start(process);
free(pipes);
return 0;
error:
uv__set_sys_error(process->loop, errno);
if (pipes != NULL) {
for (i = 0; i < stdio_count; i++) {
if (i < options.stdio_count)
if (options.stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM))
continue;
if (pipes[i][0] != -1)
close(pipes[i][0]);
if (pipes[i][1] != -1)
close(pipes[i][1]);
}
free(pipes);
}
return -1;
}
int uv_process_kill(uv_process_t* process, int signum) {
int r = kill(process->pid, signum);
if (r) {
uv__set_sys_error(process->loop, errno);
return -1;
} else {
return 0;
}
}
uv_err_t uv_kill(int pid, int signum) {
int r = kill(pid, signum);
if (r) {
return uv__new_sys_error(errno);
} else {
return uv_ok_;
}
}
void uv__process_close(uv_process_t* handle) {
/* TODO stop signal watcher when this is the last handle */
ngx_queue_remove(&handle->queue);
uv__handle_stop(handle);
}

View File

@ -1,103 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <string.h>
extern void uv__set_process_title(const char* title);
static void* args_mem;
static struct {
char* str;
size_t len;
} process_title;
char** uv_setup_args(int argc, char** argv) {
char** new_argv;
size_t size;
char* s;
int i;
if (argc <= 0)
return argv;
/* Calculate how much memory we need for the argv strings. */
size = 0;
for (i = 0; i < argc; i++)
size += strlen(argv[i]) + 1;
process_title.str = argv[0];
process_title.len = argv[argc - 1] + strlen(argv[argc - 1]) - argv[0];
assert(process_title.len + 1 == size); /* argv memory should be adjacent. */
/* Add space for the argv pointers. */
size += (argc + 1) * sizeof(char*);
new_argv = malloc(size);
if (new_argv == NULL)
return argv;
args_mem = new_argv;
/* Copy over the strings and set up the pointer table. */
s = (char*) &new_argv[argc + 1];
for (i = 0; i < argc; i++) {
size = strlen(argv[i]) + 1;
memcpy(s, argv[i], size);
new_argv[i] = s;
s += size;
}
new_argv[i] = NULL;
return new_argv;
}
uv_err_t uv_set_process_title(const char* title) {
if (process_title.len == 0)
return uv_ok_;
/* No need to terminate, byte after is always '\0'. */
strncpy(process_title.str, title, process_title.len);
uv__set_process_title(title);
return uv_ok_;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
if (process_title.len > 0)
strncpy(buffer, process_title.str, size);
else if (size > 0)
buffer[0] = '\0';
return uv_ok_;
}
__attribute__((destructor))
static void free_args_mem(void) {
free(args_mem); /* Keep valgrind happy. */
args_mem = NULL;
}

View File

@ -1,458 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
typedef struct {
uv_signal_t* handle;
int signum;
} uv__signal_msg_t;
RB_HEAD(uv__signal_tree_s, uv_signal_s);
static int uv__signal_unlock();
static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events);
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2);
static void uv__signal_stop(uv_signal_t* handle);
static pthread_once_t uv__signal_global_init_guard = PTHREAD_ONCE_INIT;
static struct uv__signal_tree_s uv__signal_tree =
RB_INITIALIZER(uv__signal_tree);
static int uv__signal_lock_pipefd[2];
RB_GENERATE_STATIC(uv__signal_tree_s,
uv_signal_s, tree_entry,
uv__signal_compare)
static void uv__signal_global_init(void) {
if (uv__make_pipe(uv__signal_lock_pipefd, 0))
abort();
if (uv__signal_unlock())
abort();
}
void uv__signal_global_once_init(void) {
pthread_once(&uv__signal_global_init_guard, uv__signal_global_init);
}
static int uv__signal_lock(void) {
int r;
char data;
do {
r = read(uv__signal_lock_pipefd[0], &data, sizeof data);
} while (r < 0 && errno == EINTR);
return (r < 0) ? -1 : 0;
}
static int uv__signal_unlock(void) {
int r;
char data = 42;
do {
r = write(uv__signal_lock_pipefd[1], &data, sizeof data);
} while (r < 0 && errno == EINTR);
return (r < 0) ? -1 : 0;
}
static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
sigset_t new_mask;
if (sigfillset(&new_mask))
abort();
if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
abort();
if (uv__signal_lock())
abort();
}
static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
if (uv__signal_unlock())
abort();
if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL))
abort();
}
inline static uv_signal_t* uv__signal_first_handle(int signum) {
/* This function must be called with the signal lock held. */
uv_signal_t lookup;
uv_signal_t* handle;
lookup.signum = signum;
lookup.loop = NULL;
handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup);
if (handle != NULL && handle->signum == signum)
return handle;
return NULL;
}
static void uv__signal_handler(int signum) {
uv__signal_msg_t msg;
uv_signal_t* handle;
int saved_errno;
saved_errno = errno;
memset(&msg, 0, sizeof msg);
if (uv__signal_lock()) {
errno = saved_errno;
return;
}
for (handle = uv__signal_first_handle(signum);
handle != NULL && handle->signum == signum;
handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) {
int r;
msg.signum = signum;
msg.handle = handle;
/* write() should be atomic for small data chunks, so the entire message
* should be written at once. In theory the pipe could become full, in
* which case the user is out of luck.
*/
do {
r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg);
} while (r == -1 && errno == EINTR);
assert(r == sizeof msg ||
(r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
if (r != -1)
handle->caught_signals++;
}
uv__signal_unlock();
errno = saved_errno;
}
static uv_err_t uv__signal_register_handler(int signum) {
/* When this function is called, the signal lock must be held. */
struct sigaction sa;
/* XXX use a separate signal stack? */
memset(&sa, 0, sizeof(sa));
if (sigfillset(&sa.sa_mask))
abort();
sa.sa_handler = uv__signal_handler;
/* XXX save old action so we can restore it later on? */
if (sigaction(signum, &sa, NULL))
return uv__new_sys_error(errno);
return uv_ok_;
}
static void uv__signal_unregister_handler(int signum) {
/* When this function is called, the signal lock must be held. */
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
/* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a
* signal implies that it was successfully registered earlier, so EINVAL
* should never happen.
*/
if (sigaction(signum, &sa, NULL))
abort();
}
static int uv__signal_loop_once_init(uv_loop_t* loop) {
/* Return if already initialized. */
if (loop->signal_pipefd[0] != -1)
return 0;
if (uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK))
return -1;
uv__io_init(&loop->signal_io_watcher,
uv__signal_event,
loop->signal_pipefd[0]);
uv__io_start(loop, &loop->signal_io_watcher, UV__POLLIN);
return 0;
}
void uv__signal_loop_cleanup(uv_loop_t* loop) {
ngx_queue_t* q;
/* Stop all the signal watchers that are still attached to this loop. This
* ensures that the (shared) signal tree doesn't contain any invalid entries
* entries, and that signal handlers are removed when appropriate.
*/
ngx_queue_foreach(q, &loop->handle_queue) {
uv_handle_t* handle = ngx_queue_data(q, uv_handle_t, handle_queue);
if (handle->type == UV_SIGNAL)
uv__signal_stop((uv_signal_t*) handle);
}
if (loop->signal_pipefd[0] != -1) {
close(loop->signal_pipefd[0]);
loop->signal_pipefd[0] = -1;
}
if (loop->signal_pipefd[1] != -1) {
close(loop->signal_pipefd[1]);
loop->signal_pipefd[1] = -1;
}
}
int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
if (uv__signal_loop_once_init(loop))
return uv__set_sys_error(loop, errno);
uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
handle->signum = 0;
handle->caught_signals = 0;
handle->dispatched_signals = 0;
return 0;
}
void uv__signal_close(uv_signal_t* handle) {
uv__signal_stop(handle);
/* If there are any caught signals "trapped" in the signal pipe, we can't
* call the close callback yet. Otherwise, add the handle to the finish_close
* queue.
*/
if (handle->caught_signals == handle->dispatched_signals) {
uv__make_close_pending((uv_handle_t*) handle);
}
}
int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
sigset_t saved_sigmask;
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
/* If the user supplies signum == 0, then return an error already. If the
* signum is otherwise invalid then uv__signal_register will find out
* eventually.
*/
if (signum == 0) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
return -1;
}
/* Short circuit: if the signal watcher is already watching {signum} don't
* go through the process of deregistering and registering the handler.
* Additionally, this avoids pending signals getting lost in the small time
* time frame that handle->signum == 0.
*/
if (signum == handle->signum) {
handle->signal_cb = signal_cb;
return 0;
}
/* If the signal handler was already active, stop it first. */
if (handle->signum != 0) {
uv__signal_stop(handle);
}
uv__signal_block_and_lock(&saved_sigmask);
/* If at this point there are no active signal watchers for this signum (in
* any of the loops), it's time to try and register a handler for it here.
*/
if (uv__signal_first_handle(signum) == NULL) {
uv_err_t err = uv__signal_register_handler(signum);
if (err.code != UV_OK) {
/* Registering the signal handler failed. Must be an invalid signal. */
handle->loop->last_err = err;
uv__signal_unlock_and_unblock(&saved_sigmask);
return -1;
}
}
handle->signum = signum;
RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle);
uv__signal_unlock_and_unblock(&saved_sigmask);
handle->signal_cb = signal_cb;
uv__handle_start(handle);
return 0;
}
static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv__signal_msg_t* msg;
uv_signal_t* handle;
char buf[sizeof(uv__signal_msg_t) * 32];
size_t bytes, end, i;
int r;
bytes = 0;
do {
r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes);
if (r == -1 && errno == EINTR)
continue;
if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
/* If there are bytes in the buffer already (which really is extremely
* unlikely if possible at all) we can't exit the function here. We'll
* spin until more bytes are read instead.
*/
if (bytes > 0)
continue;
/* Otherwise, there was nothing there. */
return;
}
/* Other errors really should never happen. */
if (r == -1)
abort();
bytes += r;
/* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */
end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t);
for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) {
msg = (uv__signal_msg_t*) (buf + i);
handle = msg->handle;
if (msg->signum == handle->signum) {
assert(!(handle->flags & UV_CLOSING));
handle->signal_cb(handle, handle->signum);
}
handle->dispatched_signals++;
/* If uv_close was called while there were caught signals that were not
* yet dispatched, the uv__finish_close was deferred. Make close pending
* now if this has happened.
*/
if ((handle->flags & UV_CLOSING) &&
(handle->caught_signals == handle->dispatched_signals)) {
uv__make_close_pending((uv_handle_t*) handle);
}
}
bytes -= end;
/* If there are any "partial" messages left, move them to the start of the
* the buffer, and spin. This should not happen.
*/
if (bytes) {
memmove(buf, buf + end, bytes);
continue;
}
} while (end == sizeof buf);
}
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
/* Compare signums first so all watchers with the same signnum end up
* adjacent.
*/
if (w1->signum < w2->signum) return -1;
if (w1->signum > w2->signum) return 1;
/* Sort by loop pointer, so we can easily look up the first item after
* { .signum = x, .loop = NULL }.
*/
if (w1->loop < w2->loop) return -1;
if (w1->loop > w2->loop) return 1;
if (w1 < w2) return -1;
if (w1 > w2) return 1;
return 0;
}
int uv_signal_stop(uv_signal_t* handle) {
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
uv__signal_stop(handle);
return 0;
}
static void uv__signal_stop(uv_signal_t* handle) {
uv_signal_t* removed_handle;
sigset_t saved_sigmask;
/* If the watcher wasn't started, this is a no-op. */
if (handle->signum == 0)
return;
uv__signal_block_and_lock(&saved_sigmask);
removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle);
assert(removed_handle == handle);
(void) removed_handle;
/* Check if there are other active signal watchers observing this signal. If
* not, unregister the signal handler.
*/
if (uv__signal_first_handle(handle->signum) == NULL)
uv__signal_unregister_handler(handle->signum);
uv__signal_unlock_and_unblock(&saved_sigmask);
handle->signum = 0;
uv__handle_stop(handle);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,680 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifndef SUNOS_NO_IFADDRS
# include <ifaddrs.h>
#endif
#include <net/if.h>
#include <sys/loadavg.h>
#include <sys/time.h>
#include <unistd.h>
#include <kstat.h>
#include <fcntl.h>
#include <sys/port.h>
#include <port.h>
#define PORT_FIRED 0x69
#define PORT_UNUSED 0x0
#define PORT_LOADED 0x99
#define PORT_DELETED -1
#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
#define PROCFS_FILE_OFFSET_BITS_HACK 1
#undef _FILE_OFFSET_BITS
#else
#define PROCFS_FILE_OFFSET_BITS_HACK 0
#endif
#include <procfs.h>
#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
#define _FILE_OFFSET_BITS 64
#endif
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
loop->fs_fd = -1;
loop->backend_fd = port_create();
if (loop->backend_fd == -1)
return -1;
uv__cloexec(loop->backend_fd, 1);
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->fs_fd != -1) {
close(loop->fs_fd);
loop->fs_fd = -1;
}
if (loop->backend_fd != -1) {
close(loop->backend_fd);
loop->backend_fd = -1;
}
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct port_event* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct port_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].portev_object == fd)
events[i].portev_object = -1;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
ngx_queue_t* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
unsigned int nfds;
unsigned int i;
int saved_errno;
int nevents;
int count;
int fd;
if (loop->nfds == 0) {
assert(ngx_queue_empty(&loop->watcher_queue));
return;
}
while (!ngx_queue_empty(&loop->watcher_queue)) {
q = ngx_queue_head(&loop->watcher_queue);
ngx_queue_remove(q);
ngx_queue_init(q);
w = ngx_queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0))
abort();
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
/* Work around a kernel bug where nfds is not updated. */
events[0].portev_source = 0;
nfds = 1;
saved_errno = 0;
if (port_getn(loop->backend_fd,
events,
ARRAY_SIZE(events),
&nfds,
timeout == -1 ? NULL : &spec)) {
/* Work around another kernel bug: port_getn() may return events even
* on error.
*/
if (errno == EINTR || errno == ETIME)
saved_errno = errno;
else
abort();
}
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (events[0].portev_source == 0) {
if (timeout == 0)
return;
if (timeout == -1)
continue;
goto update_timeout;
}
if (nfds == 0) {
assert(timeout != -1);
return;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->portev_object;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
/* File descriptor that we've stopped watching, ignore. */
if (w == NULL)
continue;
w->cb(loop, w, pe->portev_events);
nevents++;
/* Events Ports operates in oneshot mode, rearm timer on next run. */
if (w->pevents != 0 && ngx_queue_empty(&w->watcher_queue))
ngx_queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (saved_errno == ETIME) {
assert(timeout != -1);
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
uint64_t uv__hrtime(void) {
return gethrtime();
}
/*
* We could use a static buffer for the path manipulations that we need outside
* of the function, but this function could be called by multiple consumers and
* we don't want to potentially create a race condition in the use of snprintf.
*/
int uv_exepath(char* buffer, size_t* size) {
ssize_t res;
char buf[128];
if (buffer == NULL)
return (-1);
if (size == NULL)
return (-1);
(void) snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
res = readlink(buf, buffer, *size - 1);
if (res < 0)
return (res);
buffer[res] = '\0';
*size = res;
return (0);
}
uint64_t uv_get_free_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
uint64_t uv_get_total_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
}
void uv_loadavg(double avg[3]) {
(void) getloadavg(avg, 3);
}
#if defined(PORT_SOURCE_FILE)
static int uv__fs_event_rearm(uv_fs_event_t* handle) {
if (handle->fd == -1)
return 0;
if (port_associate(handle->loop->fs_fd,
PORT_SOURCE_FILE,
(uintptr_t) &handle->fo,
FILE_ATTRIB | FILE_MODIFIED,
handle) == -1) {
uv__set_sys_error(handle->loop, errno);
return -1;
}
handle->fd = PORT_LOADED;
return 0;
}
static void uv__fs_event_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents) {
uv_fs_event_t *handle = NULL;
timespec_t timeout;
port_event_t pe;
int events;
int r;
(void) w;
(void) revents;
do {
uint_t n = 1;
/*
* Note that our use of port_getn() here (and not port_get()) is deliberate:
* there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
* causes port_get() to return success instead of ETIME when there aren't
* actually any events (!); by using port_getn() in lieu of port_get(),
* we can at least workaround the bug by checking for zero returned events
* and treating it as we would ETIME.
*/
do {
memset(&timeout, 0, sizeof timeout);
r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
}
while (r == -1 && errno == EINTR);
if ((r == -1 && errno == ETIME) || n == 0)
break;
handle = (uv_fs_event_t *)pe.portev_user;
assert((r == 0) && "unexpected port_get() error");
events = 0;
if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
events |= UV_CHANGE;
if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
events |= UV_RENAME;
assert(events != 0);
handle->fd = PORT_FIRED;
handle->cb(handle, NULL, events, 0);
if (handle->fd != PORT_DELETED)
if (uv__fs_event_rearm(handle) != 0)
handle->cb(handle, NULL, 0, -1);
}
while (handle->fd != PORT_DELETED);
}
int uv_fs_event_init(uv_loop_t* loop,
uv_fs_event_t* handle,
const char* filename,
uv_fs_event_cb cb,
int flags) {
int portfd;
int first_run = 0;
if (loop->fs_fd == -1) {
if ((portfd = port_create()) == -1) {
uv__set_sys_error(loop, errno);
return -1;
}
loop->fs_fd = portfd;
first_run = 1;
}
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
handle->filename = strdup(filename);
handle->fd = PORT_UNUSED;
handle->cb = cb;
memset(&handle->fo, 0, sizeof handle->fo);
handle->fo.fo_name = handle->filename;
if (uv__fs_event_rearm(handle) != 0)
return -1;
uv__handle_start(handle); /* FIXME shouldn't start automatically */
if (first_run) {
uv__io_init(&loop->fs_event_watcher, uv__fs_event_read, portfd);
uv__io_start(loop, &loop->fs_event_watcher, UV__POLLIN);
}
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) {
port_dissociate(handle->loop->fs_fd, PORT_SOURCE_FILE, (uintptr_t)&handle->fo);
}
handle->fd = PORT_DELETED;
free(handle->filename);
handle->filename = NULL;
handle->fo.fo_name = NULL;
uv__handle_stop(handle);
}
#else /* !defined(PORT_SOURCE_FILE) */
int uv_fs_event_init(uv_loop_t* loop,
uv_fs_event_t* handle,
const char* filename,
uv_fs_event_cb cb,
int flags) {
uv__set_sys_error(loop, ENOSYS);
return -1;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
UNREACHABLE();
}
#endif /* defined(PORT_SOURCE_FILE) */
char** uv_setup_args(int argc, char** argv) {
return argv;
}
uv_err_t uv_set_process_title(const char* title) {
return uv_ok_;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
if (size > 0) {
buffer[0] = '\0';
}
return uv_ok_;
}
uv_err_t uv_resident_set_memory(size_t* rss) {
psinfo_t psinfo;
uv_err_t err;
int fd;
fd = open("/proc/self/psinfo", O_RDONLY);
if (fd == -1)
return uv__new_sys_error(errno);
err = uv_ok_;
if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo))
*rss = (size_t)psinfo.pr_rssize * 1024;
else
err = uv__new_sys_error(EINVAL);
close(fd);
return err;
}
uv_err_t uv_uptime(double* uptime) {
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_named_t *knp;
long hz = sysconf(_SC_CLK_TCK);
if ((kc = kstat_open()) == NULL)
return uv__new_sys_error(errno);
ksp = kstat_lookup(kc, (char *)"unix", 0, (char *)"system_misc");
if (kstat_read(kc, ksp, NULL) == -1) {
*uptime = -1;
} else {
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"clk_intr");
*uptime = knp->value.ul / hz;
}
kstat_close(kc);
return uv_ok_;
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
int lookup_instance;
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_named_t *knp;
uv_cpu_info_t* cpu_info;
if ((kc = kstat_open()) == NULL) {
return uv__new_sys_error(errno);
}
/* Get count of cpus */
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char *)"cpu_info", lookup_instance, NULL))) {
lookup_instance++;
}
*cpu_infos = (uv_cpu_info_t*)
malloc(lookup_instance * sizeof(uv_cpu_info_t));
if (!(*cpu_infos)) {
return uv__new_artificial_error(UV_ENOMEM);
}
*count = lookup_instance;
cpu_info = *cpu_infos;
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char *)"cpu_info", lookup_instance, NULL))) {
if (kstat_read(kc, ksp, NULL) == -1) {
cpu_info->speed = 0;
cpu_info->model = NULL;
} else {
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"clock_MHz");
assert(knp->data_type == KSTAT_DATA_INT32 ||
knp->data_type == KSTAT_DATA_INT64);
cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
: knp->value.i64;
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"brand");
assert(knp->data_type == KSTAT_DATA_STRING);
cpu_info->model = strdup(KSTAT_NAMED_STR_PTR(knp));
}
lookup_instance++;
cpu_info++;
}
cpu_info = *cpu_infos;
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char *)"cpu", lookup_instance, (char *)"sys"))){
if (kstat_read(kc, ksp, NULL) == -1) {
cpu_info->cpu_times.user = 0;
cpu_info->cpu_times.nice = 0;
cpu_info->cpu_times.sys = 0;
cpu_info->cpu_times.idle = 0;
cpu_info->cpu_times.irq = 0;
} else {
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"cpu_ticks_user");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.user = knp->value.ui64;
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"cpu_ticks_kernel");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.sys = knp->value.ui64;
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"cpu_ticks_idle");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.idle = knp->value.ui64;
knp = (kstat_named_t *) kstat_data_lookup(ksp, (char *)"intr");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.irq = knp->value.ui64;
cpu_info->cpu_times.nice = 0;
}
lookup_instance++;
cpu_info++;
}
kstat_close(kc);
return uv_ok_;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
#ifdef SUNOS_NO_IFADDRS
return uv__new_artificial_error(UV_ENOSYS);
#else
struct ifaddrs *addrs, *ent;
char ip[INET6_ADDRSTRLEN];
uv_interface_address_t* address;
if (getifaddrs(&addrs) != 0) {
return uv__new_sys_error(errno);
}
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == PF_PACKET)) {
continue;
}
(*count)++;
}
*addresses = (uv_interface_address_t*)
malloc(*count * sizeof(uv_interface_address_t));
if (!(*addresses)) {
return uv__new_artificial_error(UV_ENOMEM);
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
memset(&ip, 0, sizeof(ip));
if (!(ent->ifa_flags & IFF_UP && ent->ifa_flags & IFF_RUNNING)) {
continue;
}
if (ent->ifa_addr == NULL) {
continue;
}
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6 *)ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in *)ent->ifa_addr);
}
address->is_internal = ent->ifa_flags & IFF_PRIVATE || ent->ifa_flags &
IFF_LOOPBACK ? 1 : 0;
address++;
}
freeifaddrs(addrs);
return uv_ok_;
#endif /* SUNOS_NO_IFADDRS */
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -1,357 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP);
return 0;
}
static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) {
int sockfd;
if (uv__stream_fd(handle) != -1)
return 0;
sockfd = uv__socket(domain, SOCK_STREAM, 0);
if (sockfd == -1)
return uv__set_sys_error(handle->loop, errno);
if (uv__stream_open((uv_stream_t*)handle, sockfd, flags)) {
close(sockfd);
return -1;
}
return 0;
}
static int uv__bind(uv_tcp_t* tcp,
int domain,
struct sockaddr* addr,
int addrsize) {
int on;
if (maybe_new_socket(tcp, domain, UV_STREAM_READABLE|UV_STREAM_WRITABLE))
return -1;
on = 1;
if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
return uv__set_sys_error(tcp->loop, errno);
errno = 0;
if (bind(tcp->io_watcher.fd, addr, addrsize) && errno != EADDRINUSE)
return uv__set_sys_error(tcp->loop, errno);
tcp->delayed_error = errno;
return 0;
}
static int uv__connect(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr* addr,
socklen_t addrlen,
uv_connect_cb cb) {
int r;
assert(handle->type == UV_TCP);
if (handle->connect_req)
return uv__set_sys_error(handle->loop, EALREADY);
if (maybe_new_socket(handle,
addr->sa_family,
UV_STREAM_READABLE|UV_STREAM_WRITABLE)) {
return -1;
}
handle->delayed_error = 0;
do
r = connect(uv__stream_fd(handle), addr, addrlen);
while (r == -1 && errno == EINTR);
if (r == -1) {
if (errno == EINPROGRESS)
; /* not an error */
else if (errno == ECONNREFUSED)
/* If we get a ECONNREFUSED wait until the next tick to report the
* error. Solaris wants to report immediately--other unixes want to
* wait.
*/
handle->delayed_error = errno;
else
return uv__set_sys_error(handle->loop, errno);
}
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
ngx_queue_init(&req->queue);
handle->connect_req = req;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT);
if (handle->delayed_error)
uv__io_feed(handle->loop, &handle->io_watcher);
return 0;
}
int uv__tcp_bind(uv_tcp_t* handle, struct sockaddr_in addr) {
return uv__bind(handle,
AF_INET,
(struct sockaddr*)&addr,
sizeof(struct sockaddr_in));
}
int uv__tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) {
return uv__bind(handle,
AF_INET6,
(struct sockaddr*)&addr,
sizeof(struct sockaddr_in6));
}
int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
return uv__stream_open((uv_stream_t*)handle,
sock,
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
}
int uv_tcp_getsockname(uv_tcp_t* handle, struct sockaddr* name,
int* namelen) {
socklen_t socklen;
int saved_errno;
int rv = 0;
/* Don't clobber errno. */
saved_errno = errno;
if (handle->delayed_error) {
uv__set_sys_error(handle->loop, handle->delayed_error);
rv = -1;
goto out;
}
if (uv__stream_fd(handle) < 0) {
uv__set_sys_error(handle->loop, EINVAL);
rv = -1;
goto out;
}
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t)*namelen;
if (getsockname(uv__stream_fd(handle), name, &socklen) == -1) {
uv__set_sys_error(handle->loop, errno);
rv = -1;
} else {
*namelen = (int)socklen;
}
out:
errno = saved_errno;
return rv;
}
int uv_tcp_getpeername(uv_tcp_t* handle, struct sockaddr* name,
int* namelen) {
socklen_t socklen;
int saved_errno;
int rv = 0;
/* Don't clobber errno. */
saved_errno = errno;
if (handle->delayed_error) {
uv__set_sys_error(handle->loop, handle->delayed_error);
rv = -1;
goto out;
}
if (uv__stream_fd(handle) < 0) {
uv__set_sys_error(handle->loop, EINVAL);
rv = -1;
goto out;
}
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t)*namelen;
if (getpeername(uv__stream_fd(handle), name, &socklen) == -1) {
uv__set_sys_error(handle->loop, errno);
rv = -1;
} else {
*namelen = (int)socklen;
}
out:
errno = saved_errno;
return rv;
}
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
static int single_accept = -1;
if (tcp->delayed_error)
return uv__set_sys_error(tcp->loop, tcp->delayed_error);
if (single_accept == -1) {
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
}
if (single_accept)
tcp->flags |= UV_TCP_SINGLE_ACCEPT;
if (maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE))
return -1;
if (listen(tcp->io_watcher.fd, backlog))
return uv__set_sys_error(tcp->loop, errno);
tcp->connection_cb = cb;
/* Start listening for connections. */
tcp->io_watcher.cb = uv__server_io;
uv__io_start(tcp->loop, &tcp->io_watcher, UV__POLLIN);
return 0;
}
int uv__tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in addr,
uv_connect_cb cb) {
int saved_errno;
int status;
saved_errno = errno;
status = uv__connect(req, handle, (struct sockaddr*)&addr, sizeof addr, cb);
errno = saved_errno;
return status;
}
int uv__tcp_connect6(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in6 addr,
uv_connect_cb cb) {
int saved_errno;
int status;
saved_errno = errno;
status = uv__connect(req, handle, (struct sockaddr*)&addr, sizeof addr, cb);
errno = saved_errno;
return status;
}
int uv__tcp_nodelay(int fd, int on) {
return setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
}
int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
return -1;
#ifdef TCP_KEEPIDLE
if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay)))
return -1;
#endif
/* Solaris/SmartOS, if you don't support keep-alive,
* then don't advertise it in your system headers...
*/
#if defined(TCP_KEEPALIVE) && !defined(__sun)
if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay)))
return -1;
#endif
return 0;
}
int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
if (uv__stream_fd(handle) != -1)
if (uv__tcp_nodelay(uv__stream_fd(handle), on))
return -1;
if (on)
handle->flags |= UV_TCP_NODELAY;
else
handle->flags &= ~UV_TCP_NODELAY;
return 0;
}
int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
if (uv__stream_fd(handle) != -1)
if (uv__tcp_keepalive(uv__stream_fd(handle), on, delay))
return -1;
if (on)
handle->flags |= UV_TCP_KEEPALIVE;
else
handle->flags &= ~UV_TCP_KEEPALIVE;
/* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
* uv_tcp_t with an int that's almost never used...
*/
return 0;
}
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
if (enable)
handle->flags &= ~UV_TCP_SINGLE_ACCEPT;
else
handle->flags |= UV_TCP_SINGLE_ACCEPT;
return 0;
}
void uv__tcp_close(uv_tcp_t* handle) {
uv__stream_close((uv_stream_t*)handle);
}

View File

@ -1,431 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <pthread.h>
#include <assert.h>
#include <errno.h>
#if defined(__APPLE__) && defined(__MACH__)
#include <sys/time.h>
#endif /* defined(__APPLE__) && defined(__MACH__) */
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
int uv_thread_join(uv_thread_t *tid) {
if (pthread_join(*tid, NULL))
return -1;
else
return 0;
}
int uv_mutex_init(uv_mutex_t* mutex) {
#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
if (pthread_mutex_init(mutex, NULL))
return -1;
else
return 0;
#else
pthread_mutexattr_t attr;
int r;
if (pthread_mutexattr_init(&attr))
abort();
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
abort();
r = pthread_mutex_init(mutex, &attr);
if (pthread_mutexattr_destroy(&attr))
abort();
return r ? -1 : 0;
#endif
}
void uv_mutex_destroy(uv_mutex_t* mutex) {
if (pthread_mutex_destroy(mutex))
abort();
}
void uv_mutex_lock(uv_mutex_t* mutex) {
if (pthread_mutex_lock(mutex))
abort();
}
int uv_mutex_trylock(uv_mutex_t* mutex) {
int r;
r = pthread_mutex_trylock(mutex);
if (r && r != EBUSY && r != EAGAIN)
abort();
if (r)
return -1;
else
return 0;
}
void uv_mutex_unlock(uv_mutex_t* mutex) {
if (pthread_mutex_unlock(mutex))
abort();
}
int uv_rwlock_init(uv_rwlock_t* rwlock) {
if (pthread_rwlock_init(rwlock, NULL))
return -1;
else
return 0;
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
if (pthread_rwlock_destroy(rwlock))
abort();
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_rdlock(rwlock))
abort();
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
int r;
r = pthread_rwlock_tryrdlock(rwlock);
if (r && r != EBUSY && r != EAGAIN)
abort();
if (r)
return -1;
else
return 0;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_unlock(rwlock))
abort();
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_wrlock(rwlock))
abort();
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
int r;
r = pthread_rwlock_trywrlock(rwlock);
if (r && r != EBUSY && r != EAGAIN)
abort();
if (r)
return -1;
else
return 0;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_unlock(rwlock))
abort();
}
void uv_once(uv_once_t* guard, void (*callback)(void)) {
if (pthread_once(guard, callback))
abort();
}
#if defined(__APPLE__) && defined(__MACH__)
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
if (semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value))
return -1;
else
return 0;
}
void uv_sem_destroy(uv_sem_t* sem) {
if (semaphore_destroy(mach_task_self(), *sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (semaphore_signal(*sem))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
int r;
do
r = semaphore_wait(*sem);
while (r == KERN_ABORTED);
if (r != KERN_SUCCESS)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
mach_timespec_t interval;
interval.tv_sec = 0;
interval.tv_nsec = 0;
if (semaphore_timedwait(*sem, interval) == KERN_SUCCESS)
return 0;
else
return -1;
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
return sem_init(sem, 0, value);
}
void uv_sem_destroy(uv_sem_t* sem) {
if (sem_destroy(sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (sem_post(sem))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
int r;
do
r = sem_wait(sem);
while (r == -1 && errno == EINTR);
if (r)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
int r;
do
r = sem_trywait(sem);
while (r == -1 && errno == EINTR);
if (r && errno != EAGAIN)
abort();
return r;
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
#if defined(__APPLE__) && defined(__MACH__)
int uv_cond_init(uv_cond_t* cond) {
if (pthread_cond_init(cond, NULL))
return -1;
else
return 0;
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_cond_init(uv_cond_t* cond) {
pthread_condattr_t attr;
if (pthread_condattr_init(&attr))
return -1;
if (pthread_condattr_setclock(&attr, CLOCK_MONOTONIC))
goto error2;
if (pthread_cond_init(cond, &attr))
goto error2;
if (pthread_condattr_destroy(&attr))
goto error;
return 0;
error:
pthread_cond_destroy(cond);
error2:
pthread_condattr_destroy(&attr);
return -1;
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
void uv_cond_destroy(uv_cond_t* cond) {
if (pthread_cond_destroy(cond))
abort();
}
void uv_cond_signal(uv_cond_t* cond) {
if (pthread_cond_signal(cond))
abort();
}
void uv_cond_broadcast(uv_cond_t* cond) {
if (pthread_cond_broadcast(cond))
abort();
}
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (pthread_cond_wait(cond, mutex))
abort();
}
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
int r;
struct timespec ts;
#if defined(__APPLE__) && defined(__MACH__)
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
timeout += uv__hrtime();
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait(cond, mutex, &ts);
#endif
if (r == 0)
return 0;
if (r == ETIMEDOUT)
return -1;
abort();
return -1; /* Satisfy the compiler. */
}
#if defined(__APPLE__) && defined(__MACH__)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
barrier->n = count;
barrier->count = 0;
if (uv_mutex_init(&barrier->mutex))
return -1;
if (uv_sem_init(&barrier->turnstile1, 0))
goto error2;
if (uv_sem_init(&barrier->turnstile2, 1))
goto error;
return 0;
error:
uv_sem_destroy(&barrier->turnstile1);
error2:
uv_mutex_destroy(&barrier->mutex);
return -1;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
uv_sem_destroy(&barrier->turnstile2);
uv_sem_destroy(&barrier->turnstile1);
uv_mutex_destroy(&barrier->mutex);
}
void uv_barrier_wait(uv_barrier_t* barrier) {
uv_mutex_lock(&barrier->mutex);
if (++barrier->count == barrier->n) {
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile1);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile1);
uv_mutex_lock(&barrier->mutex);
if (--barrier->count == 0) {
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile2);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile2);
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
if (pthread_barrier_init(barrier, NULL, count))
return -1;
else
return 0;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
void uv_barrier_wait(uv_barrier_t* barrier) {
int r = pthread_barrier_wait(barrier);
if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
}
#endif /* defined(__APPLE__) && defined(__MACH__) */

View File

@ -1,286 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "internal.h"
#include <stdlib.h>
#define MAX_THREADPOOL_SIZE 128
static uv_once_t once = UV_ONCE_INIT;
static uv_cond_t cond;
static uv_mutex_t mutex;
static unsigned int nthreads;
static uv_thread_t* threads;
static uv_thread_t default_threads[4];
static ngx_queue_t exit_message;
static ngx_queue_t wq;
static volatile int initialized;
static void uv__cancelled(struct uv__work* w) {
abort();
}
/* To avoid deadlock with uv_cancel() it's crucial that the worker
* never holds the global mutex and the loop-local mutex at the same time.
*/
static void worker(void* arg) {
struct uv__work* w;
ngx_queue_t* q;
(void) arg;
for (;;) {
uv_mutex_lock(&mutex);
while (ngx_queue_empty(&wq))
uv_cond_wait(&cond, &mutex);
q = ngx_queue_head(&wq);
if (q == &exit_message)
uv_cond_signal(&cond);
else {
ngx_queue_remove(q);
ngx_queue_init(q); /* Signal uv_cancel() that the work req is
executing. */
}
uv_mutex_unlock(&mutex);
if (q == &exit_message)
break;
w = ngx_queue_data(q, struct uv__work, wq);
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
ngx_queue_insert_tail(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
}
}
static void post(ngx_queue_t* q) {
uv_mutex_lock(&mutex);
ngx_queue_insert_tail(&wq, q);
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
}
static void init_once(void) {
unsigned int i;
const char* val;
nthreads = ARRAY_SIZE(default_threads);
val = getenv("UV_THREADPOOL_SIZE");
if (val != NULL)
nthreads = atoi(val);
if (nthreads == 0)
nthreads = 1;
if (nthreads > MAX_THREADPOOL_SIZE)
nthreads = MAX_THREADPOOL_SIZE;
threads = default_threads;
if (nthreads > ARRAY_SIZE(default_threads)) {
threads = malloc(nthreads * sizeof(threads[0]));
if (threads == NULL) {
nthreads = ARRAY_SIZE(default_threads);
threads = default_threads;
}
}
if (uv_cond_init(&cond))
abort();
if (uv_mutex_init(&mutex))
abort();
ngx_queue_init(&wq);
for (i = 0; i < nthreads; i++)
if (uv_thread_create(threads + i, worker, NULL))
abort();
initialized = 1;
}
#if defined(__GNUC__)
__attribute__((destructor))
static void cleanup(void) {
unsigned int i;
if (initialized == 0)
return;
post(&exit_message);
for (i = 0; i < nthreads; i++)
if (uv_thread_join(threads + i))
abort();
if (threads != default_threads)
free(threads);
uv_mutex_destroy(&mutex);
uv_cond_destroy(&cond);
threads = NULL;
nthreads = 0;
initialized = 0;
}
#endif
void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
void (*work)(struct uv__work* w),
void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
w->done = done;
post(&w->wq);
}
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
int cancelled;
uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex);
cancelled = !ngx_queue_empty(&w->wq) && w->work != NULL;
if (cancelled)
ngx_queue_remove(&w->wq);
uv_mutex_unlock(&w->loop->wq_mutex);
uv_mutex_unlock(&mutex);
if (!cancelled)
return -1;
w->work = uv__cancelled;
uv_mutex_lock(&loop->wq_mutex);
ngx_queue_insert_tail(&loop->wq, &w->wq);
uv_async_send(&loop->wq_async);
uv_mutex_unlock(&loop->wq_mutex);
return 0;
}
void uv__work_done(uv_async_t* handle, int status) {
struct uv__work* w;
uv_loop_t* loop;
ngx_queue_t* q;
ngx_queue_t wq;
int err;
loop = container_of(handle, uv_loop_t, wq_async);
ngx_queue_init(&wq);
uv_mutex_lock(&loop->wq_mutex);
if (!ngx_queue_empty(&loop->wq)) {
q = ngx_queue_head(&loop->wq);
ngx_queue_split(&loop->wq, q, &wq);
}
uv_mutex_unlock(&loop->wq_mutex);
while (!ngx_queue_empty(&wq)) {
q = ngx_queue_head(&wq);
ngx_queue_remove(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? -UV_ECANCELED : 0;
w->done(w, err);
}
}
static void uv__queue_work(struct uv__work* w) {
uv_work_t* req = container_of(w, uv_work_t, work_req);
req->work_cb(req);
}
static void uv__queue_done(struct uv__work* w, int status) {
uv_work_t* req;
req = container_of(w, uv_work_t, work_req);
uv__req_unregister(req->loop, req);
if (req->after_work_cb == NULL)
return;
if (status == -UV_ECANCELED)
uv__set_artificial_error(req->loop, UV_ECANCELED);
req->after_work_cb(req, status ? -1 : 0);
}
int uv_queue_work(uv_loop_t* loop,
uv_work_t* req,
uv_work_cb work_cb,
uv_after_work_cb after_work_cb) {
if (work_cb == NULL)
return uv__set_artificial_error(loop, UV_EINVAL);
uv__req_init(loop, req, UV_WORK);
req->loop = loop;
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
return 0;
}
int uv_cancel(uv_req_t* req) {
struct uv__work* wreq;
uv_loop_t* loop;
switch (req->type) {
case UV_FS:
loop = ((uv_fs_t*) req)->loop;
wreq = &((uv_fs_t*) req)->work_req;
break;
case UV_GETADDRINFO:
loop = ((uv_getaddrinfo_t*) req)->loop;
wreq = &((uv_getaddrinfo_t*) req)->work_req;
break;
case UV_WORK:
loop = ((uv_work_t*) req)->loop;
wreq = &((uv_work_t*) req)->work_req;
break;
default:
return -1;
}
return uv__work_cancel(loop, req, wreq);
}

View File

@ -1,153 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <limits.h>
static int uv__timer_cmp(const uv_timer_t* a, const uv_timer_t* b) {
if (a->timeout < b->timeout)
return -1;
if (a->timeout > b->timeout)
return 1;
/*
* compare start_id when both has the same timeout. start_id is
* allocated with loop->timer_counter in uv_timer_start().
*/
if (a->start_id < b->start_id)
return -1;
if (a->start_id > b->start_id)
return 1;
return 0;
}
RB_GENERATE_STATIC(uv__timers, uv_timer_s, tree_entry, uv__timer_cmp)
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
handle->timer_cb = NULL;
handle->repeat = 0;
return 0;
}
int uv_timer_start(uv_timer_t* handle,
uv_timer_cb cb,
uint64_t timeout,
uint64_t repeat) {
uint64_t clamped_timeout;
if (uv__is_active(handle))
uv_timer_stop(handle);
clamped_timeout = handle->loop->time + timeout;
if (clamped_timeout < timeout)
clamped_timeout = (uint64_t) -1;
handle->timer_cb = cb;
handle->timeout = clamped_timeout;
handle->repeat = repeat;
/* start_id is the second index to be compared in uv__timer_cmp() */
handle->start_id = handle->loop->timer_counter++;
RB_INSERT(uv__timers, &handle->loop->timer_handles, handle);
uv__handle_start(handle);
return 0;
}
int uv_timer_stop(uv_timer_t* handle) {
if (!uv__is_active(handle))
return 0;
RB_REMOVE(uv__timers, &handle->loop->timer_handles, handle);
uv__handle_stop(handle);
return 0;
}
int uv_timer_again(uv_timer_t* handle) {
if (handle->timer_cb == NULL)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
if (handle->repeat) {
uv_timer_stop(handle);
uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
}
return 0;
}
void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
handle->repeat = repeat;
}
uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
return handle->repeat;
}
int uv__next_timeout(const uv_loop_t* loop) {
const uv_timer_t* handle;
uint64_t diff;
/* RB_MIN expects a non-const tree root. That's okay, it doesn't modify it. */
handle = RB_MIN(uv__timers, (struct uv__timers*) &loop->timer_handles);
if (handle == NULL)
return -1; /* block indefinitely */
if (handle->timeout <= loop->time)
return 0;
diff = handle->timeout - loop->time;
if (diff > INT_MAX)
diff = INT_MAX;
return diff;
}
void uv__run_timers(uv_loop_t* loop) {
uv_timer_t* handle;
while ((handle = RB_MIN(uv__timers, &loop->timer_handles))) {
if (handle->timeout > loop->time)
break;
uv_timer_stop(handle);
uv_timer_again(handle);
handle->timer_cb(handle, 0);
}
}
void uv__timer_close(uv_timer_t* handle) {
uv_timer_stop(handle);
}

View File

@ -1,179 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <unistd.h>
#include <termios.h>
#include <errno.h>
#include <sys/ioctl.h>
static int orig_termios_fd = -1;
static struct termios orig_termios;
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
uv__stream_init(loop, (uv_stream_t*)tty, UV_TTY);
#if defined(__APPLE__)
if (uv__stream_try_select((uv_stream_t*) tty, &fd))
return -1;
#endif /* defined(__APPLE__) */
if (readable) {
uv__nonblock(fd, 1);
uv__stream_open((uv_stream_t*)tty, fd, UV_STREAM_READABLE);
} else {
/* Note: writable tty we set to blocking mode. */
uv__stream_open((uv_stream_t*)tty, fd, UV_STREAM_WRITABLE);
tty->flags |= UV_STREAM_BLOCKING;
}
tty->mode = 0;
return 0;
}
int uv_tty_set_mode(uv_tty_t* tty, int mode) {
struct termios raw;
int fd;
fd = uv__stream_fd(tty);
if (mode && tty->mode == 0) {
/* on */
if (tcgetattr(fd, &tty->orig_termios)) {
goto fatal;
}
/* This is used for uv_tty_reset_mode() */
if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios;
orig_termios_fd = fd;
}
raw = tty->orig_termios;
raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
raw.c_oflag |= (ONLCR);
raw.c_cflag |= (CS8);
raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
raw.c_cc[VMIN] = 1;
raw.c_cc[VTIME] = 0;
/* Put terminal in raw mode after draining */
if (tcsetattr(fd, TCSADRAIN, &raw)) {
goto fatal;
}
tty->mode = 1;
return 0;
} else if (mode == 0 && tty->mode) {
/* off */
/* Put terminal in original mode after flushing */
if (tcsetattr(fd, TCSAFLUSH, &tty->orig_termios)) {
goto fatal;
}
tty->mode = 0;
return 0;
}
fatal:
uv__set_sys_error(tty->loop, errno);
return -1;
}
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
struct winsize ws;
if (ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws) < 0) {
uv__set_sys_error(tty->loop, errno);
return -1;
}
*width = ws.ws_col;
*height = ws.ws_row;
return 0;
}
uv_handle_type uv_guess_handle(uv_file file) {
struct sockaddr sa;
struct stat s;
socklen_t len;
int type;
if (file < 0)
return UV_UNKNOWN_HANDLE;
if (isatty(file))
return UV_TTY;
if (fstat(file, &s))
return UV_UNKNOWN_HANDLE;
if (S_ISREG(s.st_mode))
return UV_FILE;
if (S_ISCHR(s.st_mode))
return UV_FILE; /* XXX UV_NAMED_PIPE? */
if (S_ISFIFO(s.st_mode))
return UV_NAMED_PIPE;
if (!S_ISSOCK(s.st_mode))
return UV_UNKNOWN_HANDLE;
len = sizeof(type);
if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
return UV_UNKNOWN_HANDLE;
len = sizeof(sa);
if (getsockname(file, &sa, &len))
return UV_UNKNOWN_HANDLE;
if (type == SOCK_DGRAM)
if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
return UV_UDP;
if (type == SOCK_STREAM) {
if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
return UV_TCP;
if (sa.sa_family == AF_UNIX)
return UV_NAMED_PIPE;
}
return UV_UNKNOWN_HANDLE;
}
void uv_tty_reset_mode(void) {
if (orig_termios_fd >= 0) {
tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
}
}

View File

@ -1,709 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
static void uv__udp_run_completed(uv_udp_t* handle);
static void uv__udp_run_pending(uv_udp_t* handle);
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static void uv__udp_recvmsg(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static void uv__udp_sendmsg(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, int domain);
static int uv__send(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr* addr,
socklen_t addrlen,
uv_udp_send_cb send_cb);
void uv__udp_close(uv_udp_t* handle) {
uv__io_close(handle->loop, &handle->io_watcher);
uv__handle_stop(handle);
close(handle->io_watcher.fd);
handle->io_watcher.fd = -1;
}
void uv__udp_finish_close(uv_udp_t* handle) {
uv_udp_send_t* req;
ngx_queue_t* q;
assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT));
assert(handle->io_watcher.fd == -1);
uv__udp_run_completed(handle);
while (!ngx_queue_empty(&handle->write_queue)) {
q = ngx_queue_head(&handle->write_queue);
ngx_queue_remove(q);
req = ngx_queue_data(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
if (req->bufs != req->bufsml)
free(req->bufs);
req->bufs = NULL;
if (req->send_cb) {
uv__set_artificial_error(handle->loop, UV_ECANCELED);
req->send_cb(req, -1);
}
}
/* Now tear down the handle. */
handle->recv_cb = NULL;
handle->alloc_cb = NULL;
/* but _do not_ touch close_cb */
}
static void uv__udp_run_pending(uv_udp_t* handle) {
uv_udp_send_t* req;
ngx_queue_t* q;
struct msghdr h;
ssize_t size;
while (!ngx_queue_empty(&handle->write_queue)) {
q = ngx_queue_head(&handle->write_queue);
assert(q != NULL);
req = ngx_queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
memset(&h, 0, sizeof h);
h.msg_name = &req->addr;
h.msg_namelen = (req->addr.sin6_family == AF_INET6 ?
sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in));
h.msg_iov = (struct iovec*)req->bufs;
h.msg_iovlen = req->bufcnt;
do {
size = sendmsg(handle->io_watcher.fd, &h, 0);
}
while (size == -1 && errno == EINTR);
/* TODO try to write once or twice more in the
* hope that the socket becomes readable again?
*/
if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))
break;
req->status = (size == -1 ? -errno : size);
#ifndef NDEBUG
/* Sanity check. */
if (size != -1) {
ssize_t nbytes;
int i;
for (nbytes = i = 0; i < req->bufcnt; i++)
nbytes += req->bufs[i].len;
assert(size == nbytes);
}
#endif
/* Sending a datagram is an atomic operation: either all data
* is written or nothing is (and EMSGSIZE is raised). That is
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
ngx_queue_remove(&req->queue);
ngx_queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
}
static void uv__udp_run_completed(uv_udp_t* handle) {
uv_udp_send_t* req;
ngx_queue_t* q;
while (!ngx_queue_empty(&handle->write_completed_queue)) {
q = ngx_queue_head(&handle->write_completed_queue);
ngx_queue_remove(q);
req = ngx_queue_data(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
if (req->bufs != req->bufsml)
free(req->bufs);
req->bufs = NULL;
if (req->send_cb == NULL)
continue;
/* req->status >= 0 == bytes written
* req->status < 0 == errno
*/
if (req->status >= 0) {
req->send_cb(req, 0);
}
else {
uv__set_sys_error(handle->loop, -req->status);
req->send_cb(req, -1);
}
}
}
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
if (revents & UV__POLLIN)
uv__udp_recvmsg(loop, w, revents);
if (revents & UV__POLLOUT)
uv__udp_sendmsg(loop, w, revents);
}
static void uv__udp_recvmsg(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents) {
struct sockaddr_storage peer;
struct msghdr h;
uv_udp_t* handle;
ssize_t nread;
uv_buf_t buf;
int flags;
int count;
handle = container_of(w, uv_udp_t, io_watcher);
assert(handle->type == UV_UDP);
assert(revents & UV__POLLIN);
assert(handle->recv_cb != NULL);
assert(handle->alloc_cb != NULL);
/* Prevent loop starvation when the data comes in as fast as (or faster than)
* we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
*/
count = 32;
memset(&h, 0, sizeof(h));
h.msg_name = &peer;
do {
buf = handle->alloc_cb((uv_handle_t*)handle, 64 * 1024);
assert(buf.len > 0);
assert(buf.base != NULL);
h.msg_namelen = sizeof(peer);
h.msg_iov = (void*) &buf;
h.msg_iovlen = 1;
do {
nread = recvmsg(handle->io_watcher.fd, &h, 0);
}
while (nread == -1 && errno == EINTR);
if (nread == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
uv__set_sys_error(handle->loop, EAGAIN);
handle->recv_cb(handle, 0, buf, NULL, 0);
}
else {
uv__set_sys_error(handle->loop, errno);
handle->recv_cb(handle, -1, buf, NULL, 0);
}
}
else {
flags = 0;
if (h.msg_flags & MSG_TRUNC)
flags |= UV_UDP_PARTIAL;
handle->recv_cb(handle,
nread,
buf,
(struct sockaddr*)&peer,
flags);
}
}
/* recv_cb callback may decide to pause or close the handle */
while (nread != -1
&& count-- > 0
&& handle->io_watcher.fd != -1
&& handle->recv_cb != NULL);
}
static void uv__udp_sendmsg(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents) {
uv_udp_t* handle;
handle = container_of(w, uv_udp_t, io_watcher);
assert(handle->type == UV_UDP);
assert(revents & UV__POLLOUT);
assert(!ngx_queue_empty(&handle->write_queue)
|| !ngx_queue_empty(&handle->write_completed_queue));
/* Write out pending data first. */
uv__udp_run_pending(handle);
/* Drain 'request completed' queue. */
uv__udp_run_completed(handle);
if (!ngx_queue_empty(&handle->write_completed_queue)) {
/* Schedule completion callbacks. */
uv__io_feed(handle->loop, &handle->io_watcher);
}
else if (ngx_queue_empty(&handle->write_queue)) {
/* Pending queue and completion queue empty, stop watcher. */
uv__io_stop(loop, &handle->io_watcher, UV__POLLOUT);
if (!uv__io_active(&handle->io_watcher, UV__POLLIN))
uv__handle_stop(handle);
}
}
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
* refinements for programs that use multicast.
*
* Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
* are different from the BSDs: it _shares_ the port rather than steal it
* from the current listener. While useful, it's not something we can emulate
* on other platforms so we don't enable it.
*/
static int uv__set_reuse(int fd) {
int yes;
#if defined(SO_REUSEPORT) && !defined(__linux__)
yes = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return -errno;
#else
yes = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
return -errno;
#endif
return 0;
}
static int uv__bind(uv_udp_t* handle,
int domain,
struct sockaddr* addr,
socklen_t len,
unsigned flags) {
int saved_errno;
int status;
int err;
int yes;
int fd;
saved_errno = errno;
status = -1;
fd = -1;
/* Check for bad flags. */
if (flags & ~UV_UDP_IPV6ONLY) {
uv__set_sys_error(handle->loop, EINVAL);
goto out;
}
/* Cannot set IPv6-only mode on non-IPv6 socket. */
if ((flags & UV_UDP_IPV6ONLY) && domain != AF_INET6) {
uv__set_sys_error(handle->loop, EINVAL);
goto out;
}
if (handle->io_watcher.fd == -1) {
if ((fd = uv__socket(domain, SOCK_DGRAM, 0)) == -1) {
uv__set_sys_error(handle->loop, errno);
goto out;
}
handle->io_watcher.fd = fd;
}
fd = handle->io_watcher.fd;
err = uv__set_reuse(fd);
if (err) {
uv__set_sys_error(handle->loop, -err);
goto out;
}
if (flags & UV_UDP_IPV6ONLY) {
#ifdef IPV6_V6ONLY
yes = 1;
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
uv__set_sys_error(handle->loop, errno);
goto out;
}
#else
uv__set_sys_error(handle->loop, ENOTSUP);
goto out;
#endif
}
if (bind(fd, addr, len) == -1) {
uv__set_sys_error(handle->loop, errno);
goto out;
}
handle->io_watcher.fd = fd;
status = 0;
out:
if (status) {
close(handle->io_watcher.fd);
handle->io_watcher.fd = -1;
}
errno = saved_errno;
return status;
}
static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, int domain) {
unsigned char taddr[sizeof(struct sockaddr_in6)];
socklen_t addrlen;
assert(domain == AF_INET || domain == AF_INET6);
if (handle->io_watcher.fd != -1)
return 0;
switch (domain) {
case AF_INET:
{
struct sockaddr_in* addr = (void*)&taddr;
memset(addr, 0, sizeof *addr);
addr->sin_family = AF_INET;
addr->sin_addr.s_addr = INADDR_ANY;
addrlen = sizeof *addr;
break;
}
case AF_INET6:
{
struct sockaddr_in6* addr = (void*)&taddr;
memset(addr, 0, sizeof *addr);
addr->sin6_family = AF_INET6;
addr->sin6_addr = in6addr_any;
addrlen = sizeof *addr;
break;
}
default:
assert(0 && "unsupported address family");
abort();
}
return uv__bind(handle, domain, (struct sockaddr*)&taddr, addrlen, 0);
}
static int uv__send(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr* addr,
socklen_t addrlen,
uv_udp_send_cb send_cb) {
assert(bufcnt > 0);
if (uv__udp_maybe_deferred_bind(handle, addr->sa_family))
return -1;
uv__req_init(handle->loop, req, UV_UDP_SEND);
assert(addrlen <= sizeof(req->addr));
memcpy(&req->addr, addr, addrlen);
req->send_cb = send_cb;
req->handle = handle;
req->bufcnt = bufcnt;
if (bufcnt <= (int) ARRAY_SIZE(req->bufsml)) {
req->bufs = req->bufsml;
}
else if ((req->bufs = malloc(bufcnt * sizeof(bufs[0]))) == NULL) {
uv__set_sys_error(handle->loop, ENOMEM);
return -1;
}
memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0]));
ngx_queue_insert_tail(&handle->write_queue, &req->queue);
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT);
uv__handle_start(handle);
return 0;
}
int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
handle->alloc_cb = NULL;
handle->recv_cb = NULL;
uv__io_init(&handle->io_watcher, uv__udp_io, -1);
ngx_queue_init(&handle->write_queue);
ngx_queue_init(&handle->write_completed_queue);
return 0;
}
int uv__udp_bind(uv_udp_t* handle, struct sockaddr_in addr, unsigned flags) {
return uv__bind(handle,
AF_INET,
(struct sockaddr*)&addr,
sizeof addr,
flags);
}
int uv__udp_bind6(uv_udp_t* handle, struct sockaddr_in6 addr, unsigned flags) {
return uv__bind(handle,
AF_INET6,
(struct sockaddr*)&addr,
sizeof addr,
flags);
}
int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
int saved_errno;
int status;
int err;
saved_errno = errno;
status = -1;
/* Check for already active socket. */
if (handle->io_watcher.fd != -1) {
uv__set_artificial_error(handle->loop, UV_EALREADY);
goto out;
}
err = uv__set_reuse(sock);
if (err) {
uv__set_sys_error(handle->loop, -err);
goto out;
}
handle->io_watcher.fd = sock;
status = 0;
out:
errno = saved_errno;
return status;
}
int uv_udp_set_membership(uv_udp_t* handle,
const char* multicast_addr,
const char* interface_addr,
uv_membership membership) {
struct ip_mreq mreq;
int optname;
memset(&mreq, 0, sizeof mreq);
if (interface_addr) {
mreq.imr_interface.s_addr = inet_addr(interface_addr);
} else {
mreq.imr_interface.s_addr = htonl(INADDR_ANY);
}
mreq.imr_multiaddr.s_addr = inet_addr(multicast_addr);
switch (membership) {
case UV_JOIN_GROUP:
optname = IP_ADD_MEMBERSHIP;
break;
case UV_LEAVE_GROUP:
optname = IP_DROP_MEMBERSHIP;
break;
default:
return uv__set_artificial_error(handle->loop, UV_EINVAL);
}
if (setsockopt(handle->io_watcher.fd,
IPPROTO_IP,
optname,
&mreq,
sizeof(mreq))) {
return uv__set_sys_error(handle->loop, errno);
}
return 0;
}
static int uv__setsockopt_maybe_char(uv_udp_t* handle, int option, int val) {
#if defined(__sun)
char arg = val;
#else
int arg = val;
#endif
if (val < 0 || val > 255)
return uv__set_sys_error(handle->loop, EINVAL);
if (setsockopt(handle->io_watcher.fd, IPPROTO_IP, option, &arg, sizeof(arg)))
return uv__set_sys_error(handle->loop, errno);
return 0;
}
int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
if (setsockopt(handle->io_watcher.fd,
SOL_SOCKET,
SO_BROADCAST,
&on,
sizeof(on))) {
return uv__set_sys_error(handle->loop, errno);
}
return 0;
}
int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
if (ttl < 1 || ttl > 255)
return uv__set_sys_error(handle->loop, EINVAL);
if (setsockopt(handle->io_watcher.fd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)))
return uv__set_sys_error(handle->loop, errno);
return 0;
}
int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
return uv__setsockopt_maybe_char(handle, IP_MULTICAST_TTL, ttl);
}
int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
return uv__setsockopt_maybe_char(handle, IP_MULTICAST_LOOP, on);
}
int uv_udp_getsockname(uv_udp_t* handle, struct sockaddr* name, int* namelen) {
socklen_t socklen;
int saved_errno;
int rv = 0;
/* Don't clobber errno. */
saved_errno = errno;
if (handle->io_watcher.fd == -1) {
uv__set_sys_error(handle->loop, EINVAL);
rv = -1;
goto out;
}
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t)*namelen;
if (getsockname(handle->io_watcher.fd, name, &socklen) == -1) {
uv__set_sys_error(handle->loop, errno);
rv = -1;
} else {
*namelen = (int)socklen;
}
out:
errno = saved_errno;
return rv;
}
int uv__udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr_in addr,
uv_udp_send_cb send_cb) {
return uv__send(req,
handle,
bufs,
bufcnt,
(struct sockaddr*)&addr,
sizeof addr,
send_cb);
}
int uv__udp_send6(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr_in6 addr,
uv_udp_send_cb send_cb) {
return uv__send(req,
handle,
bufs,
bufcnt,
(struct sockaddr*)&addr,
sizeof addr,
send_cb);
}
int uv__udp_recv_start(uv_udp_t* handle,
uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb) {
if (alloc_cb == NULL || recv_cb == NULL) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
return -1;
}
if (uv__io_active(&handle->io_watcher, UV__POLLIN)) {
uv__set_artificial_error(handle->loop, UV_EALREADY);
return -1;
}
if (uv__udp_maybe_deferred_bind(handle, AF_INET))
return -1;
handle->alloc_cb = alloc_cb;
handle->recv_cb = recv_cb;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN);
uv__handle_start(handle);
return 0;
}
int uv__udp_recv_stop(uv_udp_t* handle) {
uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLIN);
if (!uv__io_active(&handle->io_watcher, UV__POLLOUT))
uv__handle_stop(handle);
handle->alloc_cb = NULL;
handle->recv_cb = NULL;
return 0;
}

View File

@ -1,25 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
provider uv {
probe tick__start(void* loop, int mode);
probe tick__stop(void* loop, int mode);
};

View File

@ -1,436 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#include <stdio.h>
#include <assert.h>
#include <stddef.h> /* NULL */
#include <stdlib.h> /* malloc */
#include <string.h> /* memset */
#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
size_t uv_handle_size(uv_handle_type type) {
switch (type) {
UV_HANDLE_TYPE_MAP(XX)
default:
return -1;
}
}
size_t uv_req_size(uv_req_type type) {
switch(type) {
UV_REQ_TYPE_MAP(XX)
default:
return -1;
}
}
#undef XX
size_t uv_strlcpy(char* dst, const char* src, size_t size) {
size_t n;
if (size == 0)
return 0;
for (n = 0; n < (size - 1) && *src != '\0'; n++)
*dst++ = *src++;
*dst = '\0';
return n;
}
size_t uv_strlcat(char* dst, const char* src, size_t size) {
size_t n;
if (size == 0)
return 0;
for (n = 0; n < size && *dst != '\0'; n++, dst++);
if (n == size)
return n;
while (n < (size - 1) && *src != '\0')
n++, *dst++ = *src++;
*dst = '\0';
return n;
}
uv_buf_t uv_buf_init(char* base, unsigned int len) {
uv_buf_t buf;
buf.base = base;
buf.len = len;
return buf;
}
const uv_err_t uv_ok_ = { UV_OK, 0 };
#define UV_ERR_NAME_GEN(val, name, s) case UV_##name : return #name;
const char* uv_err_name(uv_err_t err) {
switch (err.code) {
UV_ERRNO_MAP(UV_ERR_NAME_GEN)
default:
assert(0);
return NULL;
}
}
#undef UV_ERR_NAME_GEN
#define UV_STRERROR_GEN(val, name, s) case UV_##name : return s;
const char* uv_strerror(uv_err_t err) {
switch (err.code) {
UV_ERRNO_MAP(UV_STRERROR_GEN)
default:
return "Unknown system error";
}
}
#undef UV_STRERROR_GEN
int uv__set_error(uv_loop_t* loop, uv_err_code code, int sys_error) {
loop->last_err.code = code;
loop->last_err.sys_errno_ = sys_error;
return -1;
}
int uv__set_sys_error(uv_loop_t* loop, int sys_error) {
loop->last_err.code = uv_translate_sys_error(sys_error);
loop->last_err.sys_errno_ = sys_error;
return -1;
}
int uv__set_artificial_error(uv_loop_t* loop, uv_err_code code) {
loop->last_err = uv__new_artificial_error(code);
return -1;
}
uv_err_t uv__new_sys_error(int sys_error) {
uv_err_t error;
error.code = uv_translate_sys_error(sys_error);
error.sys_errno_ = sys_error;
return error;
}
uv_err_t uv__new_artificial_error(uv_err_code code) {
uv_err_t error;
error.code = code;
error.sys_errno_ = 0;
return error;
}
uv_err_t uv_last_error(uv_loop_t* loop) {
return loop->last_err;
}
struct sockaddr_in uv_ip4_addr(const char* ip, int port) {
struct sockaddr_in addr;
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = inet_addr(ip);
return addr;
}
struct sockaddr_in6 uv_ip6_addr(const char* ip, int port) {
struct sockaddr_in6 addr;
memset(&addr, 0, sizeof(struct sockaddr_in6));
addr.sin6_family = AF_INET6;
addr.sin6_port = htons(port);
uv_inet_pton(AF_INET6, ip, &addr.sin6_addr);
return addr;
}
int uv_ip4_name(struct sockaddr_in* src, char* dst, size_t size) {
uv_err_t err = uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
return err.code != UV_OK;
}
int uv_ip6_name(struct sockaddr_in6* src, char* dst, size_t size) {
uv_err_t err = uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
return err.code != UV_OK;
}
int uv_tcp_bind(uv_tcp_t* handle, struct sockaddr_in addr) {
if (handle->type != UV_TCP || addr.sin_family != AF_INET)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
else
return uv__tcp_bind(handle, addr);
}
int uv_tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) {
if (handle->type != UV_TCP || addr.sin6_family != AF_INET6)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
else
return uv__tcp_bind6(handle, addr);
}
int uv_udp_bind(uv_udp_t* handle,
struct sockaddr_in addr,
unsigned int flags) {
if (handle->type != UV_UDP || addr.sin_family != AF_INET)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
else
return uv__udp_bind(handle, addr, flags);
}
int uv_udp_bind6(uv_udp_t* handle,
struct sockaddr_in6 addr,
unsigned int flags) {
if (handle->type != UV_UDP || addr.sin6_family != AF_INET6)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
else
return uv__udp_bind6(handle, addr, flags);
}
int uv_tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in address,
uv_connect_cb cb) {
if (handle->type != UV_TCP || address.sin_family != AF_INET)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
else
return uv__tcp_connect(req, handle, address, cb);
}
int uv_tcp_connect6(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in6 address,
uv_connect_cb cb) {
if (handle->type != UV_TCP || address.sin6_family != AF_INET6)
return uv__set_artificial_error(handle->loop, UV_EINVAL);
else
return uv__tcp_connect6(req, handle, address, cb);
}
int uv_udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr_in addr,
uv_udp_send_cb send_cb) {
if (handle->type != UV_UDP || addr.sin_family != AF_INET) {
return uv__set_artificial_error(handle->loop, UV_EINVAL);
}
return uv__udp_send(req, handle, bufs, bufcnt, addr, send_cb);
}
int uv_udp_send6(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr_in6 addr,
uv_udp_send_cb send_cb) {
if (handle->type != UV_UDP || addr.sin6_family != AF_INET6) {
return uv__set_artificial_error(handle->loop, UV_EINVAL);
}
return uv__udp_send6(req, handle, bufs, bufcnt, addr, send_cb);
}
int uv_udp_recv_start(uv_udp_t* handle,
uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb) {
if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL) {
return uv__set_artificial_error(handle->loop, UV_EINVAL);
}
return uv__udp_recv_start(handle, alloc_cb, recv_cb);
}
int uv_udp_recv_stop(uv_udp_t* handle) {
if (handle->type != UV_UDP) {
return uv__set_artificial_error(handle->loop, UV_EINVAL);
}
return uv__udp_recv_stop(handle);
}
#ifdef _WIN32
static UINT __stdcall uv__thread_start(void *ctx_v)
#else
static void *uv__thread_start(void *ctx_v)
#endif
{
void (*entry)(void *arg);
void *arg;
struct {
void (*entry)(void *arg);
void *arg;
} *ctx;
ctx = ctx_v;
arg = ctx->arg;
entry = ctx->entry;
free(ctx);
entry(arg);
return 0;
}
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
struct {
void (*entry)(void *arg);
void *arg;
} *ctx;
if ((ctx = malloc(sizeof *ctx)) == NULL)
return -1;
ctx->entry = entry;
ctx->arg = arg;
#ifdef _WIN32
*tid = (HANDLE) _beginthreadex(NULL, 0, uv__thread_start, ctx, 0, NULL);
if (*tid == 0) {
#else
if (pthread_create(tid, NULL, uv__thread_start, ctx)) {
#endif
free(ctx);
return -1;
}
return 0;
}
unsigned long uv_thread_self(void) {
#ifdef _WIN32
return (unsigned long) GetCurrentThreadId();
#else
return (unsigned long) pthread_self();
#endif
}
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
ngx_queue_t* q;
uv_handle_t* h;
ngx_queue_foreach(q, &loop->handle_queue) {
h = ngx_queue_data(q, uv_handle_t, handle_queue);
if (h->flags & UV__HANDLE_INTERNAL) continue;
walk_cb(h, arg);
}
}
#ifndef NDEBUG
static void uv__print_handles(uv_loop_t* loop, int only_active) {
const char* type;
ngx_queue_t* q;
uv_handle_t* h;
if (loop == NULL)
loop = uv_default_loop();
ngx_queue_foreach(q, &loop->handle_queue) {
h = ngx_queue_data(q, uv_handle_t, handle_queue);
if (only_active && !uv__is_active(h))
continue;
switch (h->type) {
#define X(uc, lc) case UV_##uc: type = #lc; break;
UV_HANDLE_TYPE_MAP(X)
#undef X
default: type = "<unknown>";
}
fprintf(stderr,
"[%c%c%c] %-8s %p\n",
"R-"[!(h->flags & UV__HANDLE_REF)],
"A-"[!(h->flags & UV__HANDLE_ACTIVE)],
"I-"[!(h->flags & UV__HANDLE_INTERNAL)],
type,
(void*)h);
}
}
void uv_print_all_handles(uv_loop_t* loop) {
uv__print_handles(loop, 0);
}
void uv_print_active_handles(uv_loop_t* loop) {
uv__print_handles(loop, 1);
}
#endif
void uv_ref(uv_handle_t* handle) {
uv__handle_ref(handle);
}
void uv_unref(uv_handle_t* handle) {
uv__handle_unref(handle);
}
void uv_stop(uv_loop_t* loop) {
loop->stop_flag = 1;
}
uint64_t uv_now(uv_loop_t* loop) {
return loop->time;
}

View File

@ -1,207 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* This file is private to libuv. It provides common functionality to both
* Windows and Unix backends.
*/
#ifndef UV_COMMON_H_
#define UV_COMMON_H_
#include <assert.h>
#include <stddef.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv-private/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv.h"
#include "tree.h"
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#define container_of(ptr, type, member) \
((type *) ((char *) (ptr) - offsetof(type, member)))
#ifdef _MSC_VER
# define UNUSED /* empty */
# define INLINE __inline
#else
# define UNUSED __attribute__((unused))
# define INLINE inline
#endif
#ifndef _WIN32
enum {
UV__HANDLE_INTERNAL = 0x8000,
UV__HANDLE_ACTIVE = 0x4000,
UV__HANDLE_REF = 0x2000,
UV__HANDLE_CLOSING = 0 /* no-op on unix */
};
#else
# define UV__HANDLE_INTERNAL 0x80
# define UV__HANDLE_ACTIVE 0x40
# define UV__HANDLE_REF 0x20
# define UV__HANDLE_CLOSING 0x01
#endif
extern const uv_err_t uv_ok_;
uv_err_code uv_translate_sys_error(int sys_errno);
int uv__set_error(uv_loop_t* loop, uv_err_code code, int sys_error);
int uv__set_sys_error(uv_loop_t* loop, int sys_error);
int uv__set_artificial_error(uv_loop_t* loop, uv_err_code code);
uv_err_t uv__new_sys_error(int sys_error);
uv_err_t uv__new_artificial_error(uv_err_code code);
int uv__tcp_bind(uv_tcp_t* handle, struct sockaddr_in addr);
int uv__tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr);
int uv__udp_bind(uv_udp_t* handle, struct sockaddr_in addr, unsigned flags);
int uv__udp_bind6(uv_udp_t* handle, struct sockaddr_in6 addr, unsigned flags);
int uv__tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in address,
uv_connect_cb cb);
int uv__tcp_connect6(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in6 address,
uv_connect_cb cb);
int uv__udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr_in addr,
uv_udp_send_cb send_cb);
int uv__udp_send6(uv_udp_send_t* req,
uv_udp_t* handle,
uv_buf_t bufs[],
int bufcnt,
struct sockaddr_in6 addr,
uv_udp_send_cb send_cb);
int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb,
uv_udp_recv_cb recv_cb);
int uv__udp_recv_stop(uv_udp_t* handle);
void uv__fs_poll_close(uv_fs_poll_t* handle);
#define uv__has_active_reqs(loop) \
(ngx_queue_empty(&(loop)->active_reqs) == 0)
#define uv__req_register(loop, req) \
do { \
ngx_queue_insert_tail(&(loop)->active_reqs, &(req)->active_queue); \
} \
while (0)
#define uv__req_unregister(loop, req) \
do { \
assert(uv__has_active_reqs(loop)); \
ngx_queue_remove(&(req)->active_queue); \
} \
while (0)
#define uv__has_active_handles(loop) \
((loop)->active_handles > 0)
#define uv__active_handle_add(h) \
do { \
(h)->loop->active_handles++; \
} \
while (0)
#define uv__active_handle_rm(h) \
do { \
(h)->loop->active_handles--; \
} \
while (0)
#define uv__is_active(h) \
(((h)->flags & UV__HANDLE_ACTIVE) != 0)
#define uv__is_closing(h) \
(((h)->flags & (UV_CLOSING | UV_CLOSED)) != 0)
#define uv__handle_start(h) \
do { \
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
if (((h)->flags & UV__HANDLE_ACTIVE) != 0) break; \
(h)->flags |= UV__HANDLE_ACTIVE; \
if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_add(h); \
} \
while (0)
#define uv__handle_stop(h) \
do { \
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
if (((h)->flags & UV__HANDLE_ACTIVE) == 0) break; \
(h)->flags &= ~UV__HANDLE_ACTIVE; \
if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_rm(h); \
} \
while (0)
#define uv__handle_ref(h) \
do { \
if (((h)->flags & UV__HANDLE_REF) != 0) break; \
(h)->flags |= UV__HANDLE_REF; \
if (((h)->flags & UV__HANDLE_CLOSING) != 0) break; \
if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_add(h); \
} \
while (0)
#define uv__handle_unref(h) \
do { \
if (((h)->flags & UV__HANDLE_REF) == 0) break; \
(h)->flags &= ~UV__HANDLE_REF; \
if (((h)->flags & UV__HANDLE_CLOSING) != 0) break; \
if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_rm(h); \
} \
while (0)
#if defined(_WIN32)
# define uv__handle_platform_init(h)
#else
# define uv__handle_platform_init(h) ((h)->next_closing = NULL)
#endif
#define uv__handle_init(loop_, h, type_) \
do { \
(h)->loop = (loop_); \
(h)->type = (type_); \
(h)->flags = UV__HANDLE_REF; /* Ref the loop when active. */ \
ngx_queue_insert_tail(&(loop_)->handle_queue, &(h)->handle_queue); \
uv__handle_platform_init(h); \
} \
while (0)
#endif /* UV_COMMON_H_ */

View File

@ -1,66 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
/*
* Versions with an even minor version (e.g. 0.6.1 or 1.0.4) are API and ABI
* stable. When the minor version is odd, the API can change between patch
* releases. Make sure you update the -soname directives in config-unix.mk
* and uv.gyp whenever you bump UV_VERSION_MAJOR or UV_VERSION_MINOR (but
* not UV_VERSION_PATCH.)
*/
#undef UV_VERSION_MAJOR /* TODO(bnoordhuis) Remove in v0.11. */
#undef UV_VERSION_MINOR /* TODO(bnoordhuis) Remove in v0.11. */
#define UV_VERSION_MAJOR 0
#define UV_VERSION_MINOR 10
#define UV_VERSION_PATCH 25
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION ((UV_VERSION_MAJOR << 16) | \
(UV_VERSION_MINOR << 8) | \
(UV_VERSION_PATCH))
#define UV_STRINGIFY(v) UV_STRINGIFY_HELPER(v)
#define UV_STRINGIFY_HELPER(v) #v
#define UV_VERSION_STRING_BASE UV_STRINGIFY(UV_VERSION_MAJOR) "." \
UV_STRINGIFY(UV_VERSION_MINOR) "." \
UV_STRINGIFY(UV_VERSION_PATCH)
#if UV_VERSION_IS_RELEASE
# define UV_VERSION_STRING UV_VERSION_STRING_BASE
#else
# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-pre"
#endif
unsigned int uv_version(void) {
return UV_VERSION;
}
const char* uv_version_string(void) {
return UV_VERSION_STRING;
}

View File

@ -1,99 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "atomicops-inl.h"
#include "handle-inl.h"
#include "req-inl.h"
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) {
if (handle->flags & UV__HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
}
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
uv_req_t* req;
uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC);
handle->async_sent = 0;
handle->async_cb = async_cb;
req = &handle->async_req;
uv_req_init(loop, req);
req->type = UV_WAKEUP;
req->data = handle;
uv__handle_start(handle);
return 0;
}
void uv_async_close(uv_loop_t* loop, uv_async_t* handle) {
if (!((uv_async_t*)handle)->async_sent) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
uv__handle_closing(handle);
}
int uv_async_send(uv_async_t* handle) {
uv_loop_t* loop = handle->loop;
if (handle->type != UV_ASYNC) {
/* Can't set errno because that's not thread-safe. */
return -1;
}
/* The user should make sure never to call uv_async_send to a closing */
/* or closed handle. */
assert(!(handle->flags & UV__HANDLE_CLOSING));
if (!uv__atomic_exchange_set(&handle->async_sent)) {
POST_COMPLETION_FOR_REQ(loop, &handle->async_req);
}
return 0;
}
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
handle->async_sent = 0;
if (!(handle->flags & UV__HANDLE_CLOSING)) {
handle->async_cb((uv_async_t*) handle, 0);
} else {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
}

View File

@ -1,56 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_ATOMICOPS_INL_H_
#define UV_WIN_ATOMICOPS_INL_H_
#include "uv.h"
/* Atomic set operation on char */
#ifdef _MSC_VER /* MSVC */
/* _InterlockedOr8 is supported by MSVC on x32 and x64. It is slightly less */
/* efficient than InterlockedExchange, but InterlockedExchange8 does not */
/* exist, and interlocked operations on larger targets might require the */
/* target to be aligned. */
#pragma intrinsic(_InterlockedOr8)
static char __declspec(inline) uv__atomic_exchange_set(char volatile* target) {
return _InterlockedOr8(target, 1);
}
#else /* GCC */
/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
static inline char uv__atomic_exchange_set(char volatile* target) {
const char one = 1;
char old_value;
__asm__ __volatile__ ("lock xchgb %0, %1\n\t"
: "=r"(old_value), "=m"(*target)
: "0"(one), "m"(*target)
: "memory");
return old_value;
}
#endif
#endif /* UV_WIN_ATOMICOPS_INL_H_ */

View File

@ -1,306 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
/* The only event loop we support right now */
static uv_loop_t uv_default_loop_;
/* uv_once intialization guards */
static uv_once_t uv_init_guard_ = UV_ONCE_INIT;
static uv_once_t uv_default_loop_init_guard_ = UV_ONCE_INIT;
static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
const wchar_t* function, const wchar_t * file, unsigned int line,
uintptr_t reserved) {
/* No-op. */
}
static void uv_init(void) {
/* Tell Windows that we will handle critical errors. */
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
SEM_NOOPENFILEERRORBOX);
/* Tell the CRT to not exit the application when an invalid parameter is */
/* passed. The main issue is that invalid FDs will trigger this behavior. */
#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
_set_invalid_parameter_handler(uv__crt_invalid_parameter_handler);
#endif
/* Fetch winapi function pointers. This must be done first because other */
/* intialization code might need these function pointers to be loaded. */
uv_winapi_init();
/* Initialize winsock */
uv_winsock_init();
/* Initialize FS */
uv_fs_init();
/* Initialize signal stuff */
uv_signals_init();
/* Initialize console */
uv_console_init();
/* Initialize utilities */
uv__util_init();
}
static void uv_loop_init(uv_loop_t* loop) {
/* Create an I/O completion port */
loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
if (loop->iocp == NULL) {
uv_fatal_error(GetLastError(), "CreateIoCompletionPort");
}
/* To prevent uninitialized memory access, loop->time must be intialized */
/* to zero before calling uv_update_time for the first time. */
loop->time = 0;
uv_update_time(loop);
ngx_queue_init(&loop->handle_queue);
ngx_queue_init(&loop->active_reqs);
loop->active_handles = 0;
loop->pending_reqs_tail = NULL;
loop->endgame_handles = NULL;
RB_INIT(&loop->timers);
loop->check_handles = NULL;
loop->prepare_handles = NULL;
loop->idle_handles = NULL;
loop->next_prepare_handle = NULL;
loop->next_check_handle = NULL;
loop->next_idle_handle = NULL;
memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
loop->active_tcp_streams = 0;
loop->active_udp_streams = 0;
loop->timer_counter = 0;
loop->stop_flag = 0;
loop->last_err = uv_ok_;
}
static void uv_default_loop_init(void) {
/* Initialize libuv itself first */
uv__once_init();
/* Initialize the main loop */
uv_loop_init(&uv_default_loop_);
}
void uv__once_init(void) {
uv_once(&uv_init_guard_, uv_init);
}
uv_loop_t* uv_default_loop(void) {
uv_once(&uv_default_loop_init_guard_, uv_default_loop_init);
return &uv_default_loop_;
}
uv_loop_t* uv_loop_new(void) {
uv_loop_t* loop;
/* Initialize libuv itself first */
uv__once_init();
loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
if (!loop) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
uv_loop_init(loop);
return loop;
}
void uv_loop_delete(uv_loop_t* loop) {
if (loop != &uv_default_loop_) {
int i;
for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) {
SOCKET sock = loop->poll_peer_sockets[i];
if (sock != 0 && sock != INVALID_SOCKET) {
closesocket(sock);
}
}
free(loop);
}
}
int uv_backend_fd(const uv_loop_t* loop) {
return -1;
}
int uv_backend_timeout(const uv_loop_t* loop) {
return 0;
}
static void uv_poll(uv_loop_t* loop, int block) {
DWORD bytes, timeout;
ULONG_PTR key;
OVERLAPPED* overlapped;
uv_req_t* req;
if (block) {
timeout = uv_get_poll_timeout(loop);
} else {
timeout = 0;
}
GetQueuedCompletionStatus(loop->iocp,
&bytes,
&key,
&overlapped,
timeout);
if (overlapped) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlapped);
uv_insert_pending_req(loop, req);
} else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
}
}
static void uv_poll_ex(uv_loop_t* loop, int block) {
BOOL success;
DWORD timeout;
uv_req_t* req;
OVERLAPPED_ENTRY overlappeds[128];
ULONG count;
ULONG i;
if (block) {
timeout = uv_get_poll_timeout(loop);
} else {
timeout = 0;
}
success = pGetQueuedCompletionStatusEx(loop->iocp,
overlappeds,
ARRAY_SIZE(overlappeds),
&count,
timeout,
FALSE);
if (success) {
for (i = 0; i < count; i++) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
uv_insert_pending_req(loop, req);
}
} else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
}
}
static int uv__loop_alive(uv_loop_t* loop) {
return loop->active_handles > 0 ||
!ngx_queue_empty(&loop->active_reqs) ||
loop->endgame_handles != NULL;
}
int uv_run(uv_loop_t *loop, uv_run_mode mode) {
int r;
void (*poll)(uv_loop_t* loop, int block);
if (pGetQueuedCompletionStatusEx)
poll = &uv_poll_ex;
else
poll = &uv_poll;
if (!uv__loop_alive(loop))
return 0;
r = uv__loop_alive(loop);
while (r != 0 && loop->stop_flag == 0) {
uv_update_time(loop);
uv_process_timers(loop);
/* Call idle callbacks if nothing to do. */
if (loop->pending_reqs_tail == NULL &&
loop->endgame_handles == NULL) {
uv_idle_invoke(loop);
}
uv_process_reqs(loop);
uv_process_endgames(loop);
uv_prepare_invoke(loop);
(*poll)(loop, loop->idle_handles == NULL &&
loop->pending_reqs_tail == NULL &&
loop->endgame_handles == NULL &&
!loop->stop_flag &&
(loop->active_handles > 0 ||
!ngx_queue_empty(&loop->active_reqs)) &&
!(mode & UV_RUN_NOWAIT));
uv_check_invoke(loop);
r = uv__loop_alive(loop);
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
break;
}
/* The if statement lets the compiler compile it to a conditional store.
* Avoids dirtying a cache line.
*/
if (loop->stop_flag != 0)
loop->stop_flag = 0;
return r;
}

View File

@ -1,86 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
static int uv__dlerror(uv_lib_t* lib, int errorno);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
WCHAR filename_w[32768];
lib->handle = NULL;
lib->errmsg = NULL;
if (!uv_utf8_to_utf16(filename, filename_w, ARRAY_SIZE(filename_w))) {
return uv__dlerror(lib, GetLastError());
}
lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
if (lib->handle == NULL) {
return uv__dlerror(lib, GetLastError());
}
return 0;
}
void uv_dlclose(uv_lib_t* lib) {
if (lib->errmsg) {
LocalFree((void*)lib->errmsg);
lib->errmsg = NULL;
}
if (lib->handle) {
/* Ignore errors. No good way to signal them without leaking memory. */
FreeLibrary(lib->handle);
lib->handle = NULL;
}
}
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
*ptr = (void*) GetProcAddress(lib->handle, name);
return uv__dlerror(lib, *ptr ? 0 : GetLastError());
}
const char* uv_dlerror(uv_lib_t* lib) {
return lib->errmsg ? lib->errmsg : "no error";
}
static int uv__dlerror(uv_lib_t* lib, int errorno) {
if (lib->errmsg) {
LocalFree((void*)lib->errmsg);
lib->errmsg = NULL;
}
if (errorno) {
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
(LPSTR)&lib->errmsg, 0, NULL);
}
return errorno ? -1 : 0;
}

View File

@ -1,164 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <malloc.h>
#include <stdio.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
/*
* Display an error message and abort the event loop.
*/
void uv_fatal_error(const int errorno, const char* syscall) {
char* buf = NULL;
const char* errmsg;
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
if (buf) {
errmsg = buf;
} else {
errmsg = "Unknown error";
}
/* FormatMessage messages include a newline character already, */
/* so don't add another. */
if (syscall) {
fprintf(stderr, "%s: (%d) %s", syscall, errorno, errmsg);
} else {
fprintf(stderr, "(%d) %s", errorno, errmsg);
}
if (buf) {
LocalFree(buf);
}
*((char*)NULL) = 0xff; /* Force debug break */
abort();
}
uv_err_code uv_translate_sys_error(int sys_errno) {
switch (sys_errno) {
case ERROR_SUCCESS: return UV_OK;
case ERROR_NOACCESS: return UV_EACCES;
case WSAEACCES: return UV_EACCES;
case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
case WSAEADDRINUSE: return UV_EADDRINUSE;
case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
case WSAEAFNOSUPPORT: return UV_EAFNOSUPPORT;
case WSAEWOULDBLOCK: return UV_EAGAIN;
case WSAEALREADY: return UV_EALREADY;
case ERROR_INVALID_FLAGS: return UV_EBADF;
case ERROR_INVALID_HANDLE: return UV_EBADF;
case ERROR_LOCK_VIOLATION: return UV_EBUSY;
case ERROR_PIPE_BUSY: return UV_EBUSY;
case ERROR_SHARING_VIOLATION: return UV_EBUSY;
case ERROR_OPERATION_ABORTED: return UV_ECANCELED;
case WSAEINTR: return UV_ECANCELED;
case ERROR_NO_UNICODE_TRANSLATION: return UV_ECHARSET;
case ERROR_CONNECTION_ABORTED: return UV_ECONNABORTED;
case WSAECONNABORTED: return UV_ECONNABORTED;
case ERROR_CONNECTION_REFUSED: return UV_ECONNREFUSED;
case WSAECONNREFUSED: return UV_ECONNREFUSED;
case ERROR_NETNAME_DELETED: return UV_ECONNRESET;
case WSAECONNRESET: return UV_ECONNRESET;
case ERROR_ALREADY_EXISTS: return UV_EEXIST;
case ERROR_FILE_EXISTS: return UV_EEXIST;
case ERROR_BUFFER_OVERFLOW: return UV_EFAULT;
case WSAEFAULT: return UV_EFAULT;
case ERROR_HOST_UNREACHABLE: return UV_EHOSTUNREACH;
case WSAEHOSTUNREACH: return UV_EHOSTUNREACH;
case ERROR_INSUFFICIENT_BUFFER: return UV_EINVAL;
case ERROR_INVALID_DATA: return UV_EINVAL;
case ERROR_INVALID_PARAMETER: return UV_EINVAL;
case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL;
case WSAEINVAL: return UV_EINVAL;
case WSAEPFNOSUPPORT: return UV_EINVAL;
case WSAESOCKTNOSUPPORT: return UV_EINVAL;
case ERROR_BEGINNING_OF_MEDIA: return UV_EIO;
case ERROR_BUS_RESET: return UV_EIO;
case ERROR_CRC: return UV_EIO;
case ERROR_DEVICE_DOOR_OPEN: return UV_EIO;
case ERROR_DEVICE_REQUIRES_CLEANING: return UV_EIO;
case ERROR_DISK_CORRUPT: return UV_EIO;
case ERROR_EOM_OVERFLOW: return UV_EIO;
case ERROR_FILEMARK_DETECTED: return UV_EIO;
case ERROR_GEN_FAILURE: return UV_EIO;
case ERROR_INVALID_BLOCK_LENGTH: return UV_EIO;
case ERROR_IO_DEVICE: return UV_EIO;
case ERROR_NO_DATA_DETECTED: return UV_EIO;
case ERROR_NO_SIGNAL_SENT: return UV_EIO;
case ERROR_OPEN_FAILED: return UV_EIO;
case ERROR_SETMARK_DETECTED: return UV_EIO;
case ERROR_SIGNAL_REFUSED: return UV_EIO;
case ERROR_CANT_RESOLVE_FILENAME: return UV_ELOOP;
case ERROR_TOO_MANY_OPEN_FILES: return UV_EMFILE;
case WSAEMFILE: return UV_EMFILE;
case WSAEMSGSIZE: return UV_EMSGSIZE;
case ERROR_FILENAME_EXCED_RANGE: return UV_ENAMETOOLONG;
case ERROR_NETWORK_UNREACHABLE: return UV_ENETUNREACH;
case WSAENETUNREACH: return UV_ENETUNREACH;
case WSAENOBUFS: return UV_ENOBUFS;
case ERROR_DIRECTORY: return UV_ENOENT;
case ERROR_FILE_NOT_FOUND: return UV_ENOENT;
case ERROR_INVALID_NAME: return UV_ENOENT;
case ERROR_INVALID_REPARSE_DATA: return UV_ENOENT;
case ERROR_MOD_NOT_FOUND: return UV_ENOENT;
case ERROR_PATH_NOT_FOUND: return UV_ENOENT;
case WSAHOST_NOT_FOUND: return UV_ENOENT;
case WSANO_DATA: return UV_ENOENT;
case ERROR_NOT_ENOUGH_MEMORY: return UV_ENOMEM;
case ERROR_OUTOFMEMORY: return UV_ENOMEM;
case ERROR_CANNOT_MAKE: return UV_ENOSPC;
case ERROR_DISK_FULL: return UV_ENOSPC;
case ERROR_EA_TABLE_FULL: return UV_ENOSPC;
case ERROR_END_OF_MEDIA: return UV_ENOSPC;
case ERROR_HANDLE_DISK_FULL: return UV_ENOSPC;
case ERROR_NOT_CONNECTED: return UV_ENOTCONN;
case WSAENOTCONN: return UV_ENOTCONN;
case ERROR_DIR_NOT_EMPTY: return UV_ENOTEMPTY;
case WSAENOTSOCK: return UV_ENOTSOCK;
case ERROR_NOT_SUPPORTED: return UV_ENOTSUP;
case ERROR_BROKEN_PIPE: return UV_EOF;
case ERROR_ACCESS_DENIED: return UV_EPERM;
case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM;
case ERROR_BAD_PIPE: return UV_EPIPE;
case ERROR_NO_DATA: return UV_EPIPE;
case ERROR_PIPE_NOT_CONNECTED: return UV_EPIPE;
case WSAESHUTDOWN: return UV_EPIPE;
case WSAEPROTONOSUPPORT: return UV_EPROTONOSUPPORT;
case ERROR_WRITE_PROTECT: return UV_EROFS;
case ERROR_SEM_TIMEOUT: return UV_ETIMEDOUT;
case WSAETIMEDOUT: return UV_ETIMEDOUT;
case ERROR_NOT_SAME_DEVICE: return UV_EXDEV;
case ERROR_INVALID_FUNCTION: return UV_EISDIR;
default: return UV_UNKNOWN;
}
}

View File

@ -1,507 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <malloc.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
const unsigned int uv_directory_watcher_buffer_size = 4096;
static void uv_fs_event_init_handle(uv_loop_t* loop, uv_fs_event_t* handle,
const char* filename, uv_fs_event_cb cb) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_FS_EVENT);
handle->cb = cb;
handle->dir_handle = INVALID_HANDLE_VALUE;
handle->buffer = NULL;
handle->req_pending = 0;
handle->filew = NULL;
handle->short_filew = NULL;
handle->dirw = NULL;
uv_req_init(loop, (uv_req_t*)&handle->req);
handle->req.type = UV_FS_EVENT_REQ;
handle->req.data = (void*)handle;
handle->filename = strdup(filename);
if (!handle->filename) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
uv__handle_start(handle);
}
static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
uv_fs_event_t* handle) {
assert(handle->dir_handle != INVALID_HANDLE_VALUE);
assert(!handle->req_pending);
memset(&(handle->req.overlapped), 0, sizeof(handle->req.overlapped));
if (!ReadDirectoryChangesW(handle->dir_handle,
handle->buffer,
uv_directory_watcher_buffer_size,
FALSE,
FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_ATTRIBUTES |
FILE_NOTIFY_CHANGE_SIZE |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_LAST_ACCESS |
FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_SECURITY,
NULL,
&handle->req.overlapped,
NULL)) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(&handle->req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)&handle->req);
}
handle->req_pending = 1;
}
static int uv_split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
int len = wcslen(filename);
int i = len;
while (i > 0 && filename[--i] != '\\' && filename[i] != '/');
if (i == 0) {
if (dir) {
*dir = (WCHAR*)malloc((MAX_PATH + 1) * sizeof(WCHAR));
if (!*dir) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
if (!GetCurrentDirectoryW(MAX_PATH, *dir)) {
free(*dir);
*dir = NULL;
return -1;
}
}
*file = wcsdup(filename);
} else {
if (dir) {
*dir = (WCHAR*)malloc((i + 1) * sizeof(WCHAR));
if (!*dir) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
wcsncpy(*dir, filename, i);
(*dir)[i] = L'\0';
}
*file = (WCHAR*)malloc((len - i) * sizeof(WCHAR));
if (!*file) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
wcsncpy(*file, filename + i + 1, len - i - 1);
(*file)[len - i - 1] = L'\0';
}
return 0;
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle,
const char* filename, uv_fs_event_cb cb, int flags) {
int name_size, is_path_dir;
DWORD attr, last_error;
WCHAR* dir = NULL, *dir_to_watch, *filenamew = NULL;
WCHAR short_path[MAX_PATH];
uv_fs_event_init_handle(loop, handle, filename, cb);
/* Convert name to UTF16. */
name_size = uv_utf8_to_utf16(filename, NULL, 0) * sizeof(WCHAR);
filenamew = (WCHAR*)malloc(name_size);
if (!filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
if (!uv_utf8_to_utf16(filename, filenamew,
name_size / sizeof(WCHAR))) {
uv__set_sys_error(loop, GetLastError());
return -1;
}
/* Determine whether filename is a file or a directory. */
attr = GetFileAttributesW(filenamew);
if (attr == INVALID_FILE_ATTRIBUTES) {
last_error = GetLastError();
goto error;
}
is_path_dir = (attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0;
if (is_path_dir) {
/* filename is a directory, so that's the directory that we will watch. */
handle->dirw = filenamew;
dir_to_watch = filenamew;
} else {
/*
* filename is a file. So we split filename into dir & file parts, and
* watch the dir directory.
*/
/* Convert to short path. */
if (!GetShortPathNameW(filenamew, short_path, ARRAY_SIZE(short_path))) {
last_error = GetLastError();
goto error;
}
if (uv_split_path(filenamew, &dir, &handle->filew) != 0) {
last_error = GetLastError();
goto error;
}
if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) {
last_error = GetLastError();
goto error;
}
dir_to_watch = dir;
free(filenamew);
filenamew = NULL;
}
handle->dir_handle = CreateFileW(dir_to_watch,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_DELETE |
FILE_SHARE_WRITE,
NULL,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS |
FILE_FLAG_OVERLAPPED,
NULL);
if (dir) {
free(dir);
dir = NULL;
}
if (handle->dir_handle == INVALID_HANDLE_VALUE) {
last_error = GetLastError();
goto error;
}
if (CreateIoCompletionPort(handle->dir_handle,
loop->iocp,
(ULONG_PTR)handle,
0) == NULL) {
last_error = GetLastError();
goto error;
}
handle->buffer = (char*)_aligned_malloc(uv_directory_watcher_buffer_size,
sizeof(DWORD));
if (!handle->buffer) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
memset(&(handle->req.overlapped), 0, sizeof(handle->req.overlapped));
if (!ReadDirectoryChangesW(handle->dir_handle,
handle->buffer,
uv_directory_watcher_buffer_size,
FALSE,
FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_ATTRIBUTES |
FILE_NOTIFY_CHANGE_SIZE |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_LAST_ACCESS |
FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_SECURITY,
NULL,
&handle->req.overlapped,
NULL)) {
last_error = GetLastError();
goto error;
}
handle->req_pending = 1;
return 0;
error:
if (handle->filename) {
free(handle->filename);
handle->filename = NULL;
}
if (handle->filew) {
free(handle->filew);
handle->filew = NULL;
}
if (handle->short_filew) {
free(handle->short_filew);
handle->short_filew = NULL;
}
free(filenamew);
if (handle->dir_handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle->dir_handle);
handle->dir_handle = INVALID_HANDLE_VALUE;
}
if (handle->buffer) {
_aligned_free(handle->buffer);
handle->buffer = NULL;
}
uv__set_sys_error(loop, last_error);
return -1;
}
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle) {
FILE_NOTIFY_INFORMATION* file_info;
int sizew, size, result;
char* filename = NULL;
WCHAR* filenamew, *long_filenamew = NULL;
DWORD offset = 0;
assert(req->type == UV_FS_EVENT_REQ);
assert(handle->req_pending);
handle->req_pending = 0;
/* If we're closing, don't report any callbacks, and just push the handle */
/* onto the endgame queue. */
if (handle->flags & UV__HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
return;
};
file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset);
if (REQ_SUCCESS(req)) {
if (req->overlapped.InternalHigh > 0) {
do {
file_info = (FILE_NOTIFY_INFORMATION*)((char*)file_info + offset);
assert(!filename);
assert(!long_filenamew);
/*
* Fire the event only if we were asked to watch a directory,
* or if the filename filter matches.
*/
if (handle->dirw ||
_wcsnicmp(handle->filew, file_info->FileName,
file_info->FileNameLength / sizeof(WCHAR)) == 0 ||
_wcsnicmp(handle->short_filew, file_info->FileName,
file_info->FileNameLength / sizeof(WCHAR)) == 0) {
if (handle->dirw) {
/*
* We attempt to convert the file name to its long form for
* events that still point to valid files on disk.
* For removed and renamed events, we do not provide the file name.
*/
if (file_info->Action != FILE_ACTION_REMOVED &&
file_info->Action != FILE_ACTION_RENAMED_OLD_NAME) {
/* Construct a full path to the file. */
size = wcslen(handle->dirw) +
file_info->FileNameLength / sizeof(WCHAR) + 2;
filenamew = (WCHAR*)malloc(size * sizeof(WCHAR));
if (!filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
_snwprintf(filenamew, size, L"%s\\%.*s", handle->dirw,
file_info->FileNameLength / sizeof(WCHAR),
file_info->FileName);
filenamew[size - 1] = L'\0';
/* Convert to long name. */
size = GetLongPathNameW(filenamew, NULL, 0);
if (size) {
long_filenamew = (WCHAR*)malloc(size * sizeof(WCHAR));
if (!long_filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
size = GetLongPathNameW(filenamew, long_filenamew, size);
if (size) {
long_filenamew[size] = '\0';
} else {
free(long_filenamew);
long_filenamew = NULL;
}
}
free(filenamew);
if (long_filenamew) {
/* Get the file name out of the long path. */
result = uv_split_path(long_filenamew, NULL, &filenamew);
free(long_filenamew);
if (result == 0) {
long_filenamew = filenamew;
sizew = -1;
} else {
long_filenamew = NULL;
}
}
/*
* If we couldn't get the long name - just use the name
* provided by ReadDirectoryChangesW.
*/
if (!long_filenamew) {
filenamew = file_info->FileName;
sizew = file_info->FileNameLength / sizeof(WCHAR);
}
} else {
/* Removed or renamed callbacks don't provide filename. */
filenamew = NULL;
}
} else {
/* We already have the long name of the file, so just use it. */
filenamew = handle->filew;
sizew = -1;
}
if (filenamew) {
/* Convert the filename to utf8. */
size = uv_utf16_to_utf8(filenamew,
sizew,
NULL,
0);
if (size) {
filename = (char*)malloc(size + 1);
if (!filename) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
size = uv_utf16_to_utf8(filenamew,
sizew,
filename,
size);
if (size) {
filename[size] = '\0';
} else {
free(filename);
filename = NULL;
}
}
}
switch (file_info->Action) {
case FILE_ACTION_ADDED:
case FILE_ACTION_REMOVED:
case FILE_ACTION_RENAMED_OLD_NAME:
case FILE_ACTION_RENAMED_NEW_NAME:
handle->cb(handle, filename, UV_RENAME, 0);
break;
case FILE_ACTION_MODIFIED:
handle->cb(handle, filename, UV_CHANGE, 0);
break;
}
free(filename);
filename = NULL;
free(long_filenamew);
long_filenamew = NULL;
}
offset = file_info->NextEntryOffset;
} while (offset && !(handle->flags & UV__HANDLE_CLOSING));
} else {
handle->cb(handle, NULL, UV_CHANGE, 0);
}
} else {
uv__set_sys_error(loop, GET_REQ_ERROR(req));
handle->cb(handle, NULL, 0, -1);
}
if (!(handle->flags & UV__HANDLE_CLOSING)) {
uv_fs_event_queue_readdirchanges(loop, handle);
} else {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
if (handle->dir_handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle->dir_handle);
handle->dir_handle = INVALID_HANDLE_VALUE;
}
if (!handle->req_pending) {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
uv__handle_closing(handle);
}
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
if (handle->flags & UV__HANDLE_CLOSING &&
!handle->req_pending) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
if (handle->buffer) {
_aligned_free(handle->buffer);
handle->buffer = NULL;
}
if (handle->filew) {
free(handle->filew);
handle->filew = NULL;
}
if (handle->short_filew) {
free(handle->short_filew);
handle->short_filew = NULL;
}
if (handle->filename) {
free(handle->filename);
handle->filename = NULL;
}
if (handle->dirw) {
free(handle->dirw);
handle->dirw = NULL;
}
uv__handle_close(handle);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,365 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <malloc.h>
#include "uv.h"
#include "internal.h"
#include "req-inl.h"
/*
* MinGW is missing this
*/
#if !defined(_MSC_VER) && !defined(__MINGW64_VERSION_MAJOR)
typedef struct addrinfoW {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
size_t ai_addrlen;
WCHAR* ai_canonname;
struct sockaddr* ai_addr;
struct addrinfoW* ai_next;
} ADDRINFOW, *PADDRINFOW;
DECLSPEC_IMPORT int WSAAPI GetAddrInfoW(const WCHAR* node,
const WCHAR* service,
const ADDRINFOW* hints,
PADDRINFOW* result);
DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo);
#endif
/* adjust size value to be multiple of 4. Use to keep pointer aligned */
/* Do we need different versions of this for different architectures? */
#define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2)
/*
* getaddrinfo error code mapping
* Falls back to uv_translate_sys_error if no match
*/
static uv_err_code uv_translate_eai_error(int eai_errno) {
switch (eai_errno) {
case ERROR_SUCCESS: return UV_OK;
case EAI_BADFLAGS: return UV_EBADF;
case EAI_FAIL: return UV_EFAULT;
case EAI_FAMILY: return UV_EAIFAMNOSUPPORT;
case EAI_MEMORY: return UV_ENOMEM;
case EAI_NONAME: return UV_ENOENT;
case EAI_AGAIN: return UV_EAGAIN;
case EAI_SERVICE: return UV_EAISERVICE;
case EAI_SOCKTYPE: return UV_EAISOCKTYPE;
default: return uv_translate_sys_error(eai_errno);
}
}
/* getaddrinfo worker thread implementation */
static DWORD WINAPI getaddrinfo_thread_proc(void* parameter) {
uv_getaddrinfo_t* req = (uv_getaddrinfo_t*) parameter;
uv_loop_t* loop = req->loop;
int ret;
assert(req != NULL);
/* call OS function on this thread */
ret = GetAddrInfoW(req->node,
req->service,
req->hints,
&req->res);
req->retcode = ret;
/* post getaddrinfo completed */
POST_COMPLETION_FOR_REQ(loop, req);
return 0;
}
/*
* Called from uv_run when complete. Call user specified callback
* then free returned addrinfo
* Returned addrinfo strings are converted from UTF-16 to UTF-8.
*
* To minimize allocation we calculate total size required,
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
void uv_process_getaddrinfo_req(uv_loop_t* loop, uv_getaddrinfo_t* req) {
int addrinfo_len = 0;
int name_len = 0;
size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo));
struct addrinfoW* addrinfow_ptr;
struct addrinfo* addrinfo_ptr;
char* alloc_ptr = NULL;
char* cur_ptr = NULL;
int status = 0;
/* release input parameter memory */
if (req->alloc != NULL) {
free(req->alloc);
req->alloc = NULL;
}
if (req->retcode == 0) {
/* convert addrinfoW to addrinfo */
/* first calculate required length */
addrinfow_ptr = req->res;
while (addrinfow_ptr != NULL) {
addrinfo_len += addrinfo_struct_len +
ALIGNED_SIZE(addrinfow_ptr->ai_addrlen);
if (addrinfow_ptr->ai_canonname != NULL) {
name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname, -1, NULL, 0);
if (name_len == 0) {
uv__set_sys_error(loop, GetLastError());
status = -1;
goto complete;
}
addrinfo_len += ALIGNED_SIZE(name_len);
}
addrinfow_ptr = addrinfow_ptr->ai_next;
}
/* allocate memory for addrinfo results */
alloc_ptr = (char*)malloc(addrinfo_len);
/* do conversions */
if (alloc_ptr != NULL) {
cur_ptr = alloc_ptr;
addrinfow_ptr = req->res;
while (addrinfow_ptr != NULL) {
/* copy addrinfo struct data */
assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len);
addrinfo_ptr = (struct addrinfo*)cur_ptr;
addrinfo_ptr->ai_family = addrinfow_ptr->ai_family;
addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype;
addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol;
addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags;
addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen;
addrinfo_ptr->ai_canonname = NULL;
addrinfo_ptr->ai_addr = NULL;
addrinfo_ptr->ai_next = NULL;
cur_ptr += addrinfo_struct_len;
/* copy sockaddr */
if (addrinfo_ptr->ai_addrlen > 0) {
assert(cur_ptr + addrinfo_ptr->ai_addrlen <=
alloc_ptr + addrinfo_len);
memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen);
addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr;
cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen);
}
/* convert canonical name to UTF-8 */
if (addrinfow_ptr->ai_canonname != NULL) {
name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname,
-1,
NULL,
0);
assert(name_len > 0);
assert(cur_ptr + name_len <= alloc_ptr + addrinfo_len);
name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname,
-1,
cur_ptr,
name_len);
assert(name_len > 0);
addrinfo_ptr->ai_canonname = cur_ptr;
cur_ptr += ALIGNED_SIZE(name_len);
}
assert(cur_ptr <= alloc_ptr + addrinfo_len);
/* set next ptr */
addrinfow_ptr = addrinfow_ptr->ai_next;
if (addrinfow_ptr != NULL) {
addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr;
}
}
} else {
uv__set_artificial_error(loop, UV_ENOMEM);
status = -1;
}
} else {
/* GetAddrInfo failed */
uv__set_artificial_error(loop, uv_translate_eai_error(req->retcode));
status = -1;
}
/* return memory to system */
if (req->res != NULL) {
FreeAddrInfoW(req->res);
req->res = NULL;
}
complete:
uv__req_unregister(loop, req);
/* finally do callback with converted result */
req->getaddrinfo_cb(req, status, (struct addrinfo*)alloc_ptr);
}
void uv_freeaddrinfo(struct addrinfo* ai) {
char* alloc_ptr = (char*)ai;
/* release copied result memory */
if (alloc_ptr != NULL) {
free(alloc_ptr);
}
}
/*
* Entry point for getaddrinfo
* we convert the UTF-8 strings to UNICODE
* and save the UNICODE string pointers in the req
* We also copy hints so that caller does not need to keep memory until the
* callback.
* return UV_OK if a callback will be made
* return error code if validation fails
*
* To minimize allocation we calculate total size required,
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
int uv_getaddrinfo(uv_loop_t* loop,
uv_getaddrinfo_t* req,
uv_getaddrinfo_cb getaddrinfo_cb,
const char* node,
const char* service,
const struct addrinfo* hints) {
int nodesize = 0;
int servicesize = 0;
int hintssize = 0;
char* alloc_ptr = NULL;
if (req == NULL || getaddrinfo_cb == NULL ||
(node == NULL && service == NULL)) {
uv__set_sys_error(loop, WSAEINVAL);
goto error;
}
uv_req_init(loop, (uv_req_t*)req);
req->getaddrinfo_cb = getaddrinfo_cb;
req->res = NULL;
req->type = UV_GETADDRINFO;
req->loop = loop;
/* calculate required memory size for all input values */
if (node != NULL) {
nodesize = ALIGNED_SIZE(uv_utf8_to_utf16(node, NULL, 0) * sizeof(WCHAR));
if (nodesize == 0) {
uv__set_sys_error(loop, GetLastError());
goto error;
}
}
if (service != NULL) {
servicesize = ALIGNED_SIZE(uv_utf8_to_utf16(service, NULL, 0) *
sizeof(WCHAR));
if (servicesize == 0) {
uv__set_sys_error(loop, GetLastError());
goto error;
}
}
if (hints != NULL) {
hintssize = ALIGNED_SIZE(sizeof(struct addrinfoW));
}
/* allocate memory for inputs, and partition it as needed */
alloc_ptr = (char*)malloc(nodesize + servicesize + hintssize);
if (!alloc_ptr) {
uv__set_sys_error(loop, WSAENOBUFS);
goto error;
}
/* save alloc_ptr now so we can free if error */
req->alloc = (void*)alloc_ptr;
/* convert node string to UTF16 into allocated memory and save pointer in */
/* the reques. */
if (node != NULL) {
req->node = (WCHAR*)alloc_ptr;
if (uv_utf8_to_utf16(node,
(WCHAR*) alloc_ptr,
nodesize / sizeof(WCHAR)) == 0) {
uv__set_sys_error(loop, GetLastError());
goto error;
}
alloc_ptr += nodesize;
} else {
req->node = NULL;
}
/* convert service string to UTF16 into allocated memory and save pointer */
/* in the req. */
if (service != NULL) {
req->service = (WCHAR*)alloc_ptr;
if (uv_utf8_to_utf16(service,
(WCHAR*) alloc_ptr,
servicesize / sizeof(WCHAR)) == 0) {
uv__set_sys_error(loop, GetLastError());
goto error;
}
alloc_ptr += servicesize;
} else {
req->service = NULL;
}
/* copy hints to allocated memory and save pointer in req */
if (hints != NULL) {
req->hints = (struct addrinfoW*)alloc_ptr;
req->hints->ai_family = hints->ai_family;
req->hints->ai_socktype = hints->ai_socktype;
req->hints->ai_protocol = hints->ai_protocol;
req->hints->ai_flags = hints->ai_flags;
req->hints->ai_addrlen = 0;
req->hints->ai_canonname = NULL;
req->hints->ai_addr = NULL;
req->hints->ai_next = NULL;
} else {
req->hints = NULL;
}
/* Ask thread to run. Treat this as a long operation */
if (QueueUserWorkItem(&getaddrinfo_thread_proc,
req,
WT_EXECUTELONGFUNCTION) == 0) {
uv__set_sys_error(loop, GetLastError());
goto error;
}
uv__req_register(loop, req);
return 0;
error:
if (req != NULL && req->alloc != NULL) {
free(req->alloc);
}
return -1;
}

View File

@ -1,164 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_HANDLE_INL_H_
#define UV_WIN_HANDLE_INL_H_
#include <assert.h>
#include "uv.h"
#include "internal.h"
#define DECREASE_ACTIVE_COUNT(loop, handle) \
do { \
if (--(handle)->activecnt == 0 && \
!((handle)->flags & UV__HANDLE_CLOSING)) { \
uv__handle_stop((handle)); \
} \
assert((handle)->activecnt >= 0); \
} while (0)
#define INCREASE_ACTIVE_COUNT(loop, handle) \
do { \
if ((handle)->activecnt++ == 0) { \
uv__handle_start((handle)); \
} \
assert((handle)->activecnt > 0); \
} while (0)
#define DECREASE_PENDING_REQ_COUNT(handle) \
do { \
assert(handle->reqs_pending > 0); \
handle->reqs_pending--; \
\
if (handle->flags & UV__HANDLE_CLOSING && \
handle->reqs_pending == 0) { \
uv_want_endgame(loop, (uv_handle_t*)handle); \
} \
} while (0)
#define uv__handle_closing(handle) \
do { \
assert(!((handle)->flags & UV__HANDLE_CLOSING)); \
\
if (!(((handle)->flags & UV__HANDLE_ACTIVE) && \
((handle)->flags & UV__HANDLE_REF))) \
uv__active_handle_add((uv_handle_t*) (handle)); \
\
(handle)->flags |= UV__HANDLE_CLOSING; \
(handle)->flags &= ~UV__HANDLE_ACTIVE; \
} while (0)
#define uv__handle_close(handle) \
do { \
ngx_queue_remove(&(handle)->handle_queue); \
uv__active_handle_rm((uv_handle_t*) (handle)); \
\
(handle)->flags |= UV_HANDLE_CLOSED; \
\
if ((handle)->close_cb) \
(handle)->close_cb((uv_handle_t*) (handle)); \
} while (0)
INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
handle->endgame_next = loop->endgame_handles;
loop->endgame_handles = handle;
}
}
INLINE static void uv_process_endgames(uv_loop_t* loop) {
uv_handle_t* handle;
while (loop->endgame_handles) {
handle = loop->endgame_handles;
loop->endgame_handles = handle->endgame_next;
handle->flags &= ~UV_HANDLE_ENDGAME_QUEUED;
switch (handle->type) {
case UV_TCP:
uv_tcp_endgame(loop, (uv_tcp_t*) handle);
break;
case UV_NAMED_PIPE:
uv_pipe_endgame(loop, (uv_pipe_t*) handle);
break;
case UV_TTY:
uv_tty_endgame(loop, (uv_tty_t*) handle);
break;
case UV_UDP:
uv_udp_endgame(loop, (uv_udp_t*) handle);
break;
case UV_POLL:
uv_poll_endgame(loop, (uv_poll_t*) handle);
break;
case UV_TIMER:
uv_timer_endgame(loop, (uv_timer_t*) handle);
break;
case UV_PREPARE:
case UV_CHECK:
case UV_IDLE:
uv_loop_watcher_endgame(loop, handle);
break;
case UV_ASYNC:
uv_async_endgame(loop, (uv_async_t*) handle);
break;
case UV_SIGNAL:
uv_signal_endgame(loop, (uv_signal_t*) handle);
break;
case UV_PROCESS:
uv_process_endgame(loop, (uv_process_t*) handle);
break;
case UV_FS_EVENT:
uv_fs_event_endgame(loop, (uv_fs_event_t*) handle);
break;
case UV_FS_POLL:
uv__fs_poll_endgame(loop, (uv_fs_poll_t*) handle);
break;
default:
assert(0);
break;
}
}
}
#endif /* UV_WIN_HANDLE_INL_H_ */

View File

@ -1,153 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <io.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
uv_handle_type uv_guess_handle(uv_file file) {
HANDLE handle;
DWORD mode;
if (file < 0) {
return UV_UNKNOWN_HANDLE;
}
handle = (HANDLE) _get_osfhandle(file);
switch (GetFileType(handle)) {
case FILE_TYPE_CHAR:
if (GetConsoleMode(handle, &mode)) {
return UV_TTY;
} else {
return UV_FILE;
}
case FILE_TYPE_PIPE:
return UV_NAMED_PIPE;
case FILE_TYPE_DISK:
return UV_FILE;
default:
return UV_UNKNOWN_HANDLE;
}
}
int uv_is_active(const uv_handle_t* handle) {
return (handle->flags & UV__HANDLE_ACTIVE) &&
!(handle->flags & UV__HANDLE_CLOSING);
}
void uv_close(uv_handle_t* handle, uv_close_cb cb) {
uv_loop_t* loop = handle->loop;
if (handle->flags & UV__HANDLE_CLOSING) {
assert(0);
return;
}
handle->close_cb = cb;
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
uv_tcp_close(loop, (uv_tcp_t*)handle);
return;
case UV_NAMED_PIPE:
uv_pipe_close(loop, (uv_pipe_t*) handle);
return;
case UV_TTY:
uv_tty_close((uv_tty_t*) handle);
return;
case UV_UDP:
uv_udp_close(loop, (uv_udp_t*) handle);
return;
case UV_POLL:
uv_poll_close(loop, (uv_poll_t*) handle);
return;
case UV_TIMER:
uv_timer_stop((uv_timer_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
return;
case UV_PREPARE:
uv_prepare_stop((uv_prepare_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
return;
case UV_CHECK:
uv_check_stop((uv_check_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
return;
case UV_IDLE:
uv_idle_stop((uv_idle_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
return;
case UV_ASYNC:
uv_async_close(loop, (uv_async_t*) handle);
return;
case UV_SIGNAL:
uv_signal_close(loop, (uv_signal_t*) handle);
return;
case UV_PROCESS:
uv_process_close(loop, (uv_process_t*) handle);
return;
case UV_FS_EVENT:
uv_fs_event_close(loop, (uv_fs_event_t*) handle);
return;
case UV_FS_POLL:
uv__fs_poll_close((uv_fs_poll_t*) handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
return;
default:
/* Not supported */
abort();
}
}
int uv_is_closing(const uv_handle_t* handle) {
return handle->flags & (UV__HANDLE_CLOSING | UV_HANDLE_CLOSED);
}

View File

@ -1,346 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_INTERNAL_H_
#define UV_WIN_INTERNAL_H_
#include "uv.h"
#include "../uv-common.h"
#include "tree.h"
#include "winapi.h"
#include "winsock.h"
/*
* Handles
* (also see handle-inl.h)
*/
/* Used by all handles. */
#define UV_HANDLE_CLOSED 0x00000002
#define UV_HANDLE_ENDGAME_QUEUED 0x00000004
#define UV_HANDLE_ACTIVE 0x00000010
/* uv-common.h: #define UV__HANDLE_CLOSING 0x00000001 */
/* uv-common.h: #define UV__HANDLE_ACTIVE 0x00000040 */
/* uv-common.h: #define UV__HANDLE_REF 0x00000020 */
/* uv-common.h: #define UV_HANDLE_INTERNAL 0x00000080 */
/* Used by streams and UDP handles. */
#define UV_HANDLE_READING 0x00000100
#define UV_HANDLE_BOUND 0x00000200
#define UV_HANDLE_BIND_ERROR 0x00000400
#define UV_HANDLE_LISTENING 0x00000800
#define UV_HANDLE_CONNECTION 0x00001000
#define UV_HANDLE_CONNECTED 0x00002000
#define UV_HANDLE_READABLE 0x00008000
#define UV_HANDLE_WRITABLE 0x00010000
#define UV_HANDLE_READ_PENDING 0x00020000
#define UV_HANDLE_SYNC_BYPASS_IOCP 0x00040000
#define UV_HANDLE_ZERO_READ 0x00080000
#define UV_HANDLE_EMULATE_IOCP 0x00100000
/* Only used by uv_tcp_t handles. */
#define UV_HANDLE_IPV6 0x01000000
#define UV_HANDLE_TCP_NODELAY 0x02000000
#define UV_HANDLE_TCP_KEEPALIVE 0x04000000
#define UV_HANDLE_TCP_SINGLE_ACCEPT 0x08000000
#define UV_HANDLE_TCP_ACCEPT_STATE_CHANGING 0x10000000
#define UV_HANDLE_TCP_SOCKET_CLOSED 0x20000000
#define UV_HANDLE_SHARED_TCP_SOCKET 0x40000000
/* Only used by uv_pipe_t handles. */
#define UV_HANDLE_NON_OVERLAPPED_PIPE 0x01000000
#define UV_HANDLE_PIPESERVER 0x02000000
/* Only used by uv_tty_t handles. */
#define UV_HANDLE_TTY_READABLE 0x01000000
#define UV_HANDLE_TTY_RAW 0x02000000
#define UV_HANDLE_TTY_SAVED_POSITION 0x04000000
#define UV_HANDLE_TTY_SAVED_ATTRIBUTES 0x08000000
/* Only used by uv_poll_t handles. */
#define UV_HANDLE_POLL_SLOW 0x02000000
/*
* Requests: see req-inl.h
*/
/*
* Streams: see stream-inl.h
*/
/*
* TCP
*/
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
uv_buf_t bufs[], int bufcnt, uv_write_cb cb);
void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_write_t* req);
void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req);
void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_connect_t* req);
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info,
int tcp_connection);
int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
LPWSAPROTOCOL_INFOW protocol_info);
/*
* UDP
*/
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req);
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle);
void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
* Pipes
*/
uv_err_t uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
char* name, size_t nameSize);
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_pipe_read2_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
uv_read2_cb read_cb);
int uv_pipe_write(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle,
uv_buf_t bufs[], int bufcnt, uv_write_cb cb);
int uv_pipe_write2(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle,
uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb);
void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* req);
void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_write_t* req);
void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* raw_req);
void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_connect_t* req);
void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req);
void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
/*
* TTY
*/
void uv_console_init();
int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_tty_read_stop(uv_tty_t* handle);
int uv_tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
uv_buf_t bufs[], int bufcnt, uv_write_cb cb);
void uv_tty_close(uv_tty_t* handle);
void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req);
void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req);
/* TODO: remove me */
void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req);
/* TODO: remove me */
void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req);
void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
/*
* Poll watchers
*/
void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req);
void uv_poll_close(uv_loop_t* loop, uv_poll_t* handle);
void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
/*
* Timers
*/
void uv_timer_endgame(uv_loop_t* loop, uv_timer_t* handle);
DWORD uv_get_poll_timeout(uv_loop_t* loop);
void uv_process_timers(uv_loop_t* loop);
/*
* Loop watchers
*/
void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv_prepare_invoke(uv_loop_t* loop);
void uv_check_invoke(uv_loop_t* loop);
void uv_idle_invoke(uv_loop_t* loop);
void uv__once_init();
/*
* Async watcher
*/
void uv_async_close(uv_loop_t* loop, uv_async_t* handle);
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle);
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req);
/*
* Signal watcher
*/
void uv_signals_init();
int uv__signal_dispatch(int signum);
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle);
void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req);
/*
* Spawn
*/
void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
void uv_process_close(uv_loop_t* loop, uv_process_t* handle);
void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle);
/*
* Getaddrinfo
*/
void uv_process_getaddrinfo_req(uv_loop_t* loop, uv_getaddrinfo_t* req);
/*
* FS
*/
void uv_fs_init();
void uv_process_fs_req(uv_loop_t* loop, uv_fs_t* req);
/*
* Threadpool
*/
void uv_process_work_req(uv_loop_t* loop, uv_work_t* req);
/*
* FS Event
*/
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle);
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
/*
* Stat poller.
*/
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
/*
* Utilities.
*/
void uv__util_init();
int uv_parent_pid();
void uv_fatal_error(const int errorno, const char* syscall);
uv_err_code uv_translate_sys_error(int sys_errno);
/*
* Process stdio handles.
*/
uv_err_t uv__stdio_create(uv_loop_t* loop, uv_process_options_t* options,
BYTE** buffer_ptr);
void uv__stdio_destroy(BYTE* buffer);
void uv__stdio_noinherit(BYTE* buffer);
int uv__stdio_verify(BYTE* buffer, WORD size);
WORD uv__stdio_size(BYTE* buffer);
HANDLE uv__stdio_handle(BYTE* buffer, int fd);
/*
* Winapi and ntapi utility functions
*/
void uv_winapi_init();
/*
* Winsock utility functions
*/
void uv_winsock_init();
int uv_ntstatus_to_winsock_error(NTSTATUS status);
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info,
OVERLAPPED* overlapped);
/* Whether there are any non-IFS LSPs stacked on TCP */
extern int uv_tcp_non_ifs_lsp_ipv4;
extern int uv_tcp_non_ifs_lsp_ipv6;
/* Ip address used to bind to any port at any interface */
extern struct sockaddr_in uv_addr_ip4_any_;
extern struct sockaddr_in6 uv_addr_ip6_any_;
#endif /* UV_WIN_INTERNAL_H_ */

View File

@ -1,124 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (handle->flags & UV__HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
uv__handle_close(handle);
}
}
#define UV_LOOP_WATCHER_DEFINE(name, NAME) \
int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
uv__handle_init(loop, (uv_handle_t*) handle, UV_##NAME); \
\
return 0; \
} \
\
\
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
uv_loop_t* loop = handle->loop; \
uv_##name##_t* old_head; \
\
assert(handle->type == UV_##NAME); \
\
if (handle->flags & UV_HANDLE_ACTIVE) \
return 0; \
\
if (cb == NULL) \
return uv__set_artificial_error(handle->loop, UV_EINVAL); \
\
old_head = loop->name##_handles; \
\
handle->name##_next = old_head; \
handle->name##_prev = NULL; \
\
if (old_head) { \
old_head->name##_prev = handle; \
} \
\
loop->name##_handles = handle; \
\
handle->name##_cb = cb; \
handle->flags |= UV_HANDLE_ACTIVE; \
uv__handle_start(handle); \
\
return 0; \
} \
\
\
int uv_##name##_stop(uv_##name##_t* handle) { \
uv_loop_t* loop = handle->loop; \
\
assert(handle->type == UV_##NAME); \
\
if (!(handle->flags & UV_HANDLE_ACTIVE)) \
return 0; \
\
/* Update loop head if needed */ \
if (loop->name##_handles == handle) { \
loop->name##_handles = handle->name##_next; \
} \
\
/* Update the iterator-next pointer of needed */ \
if (loop->next_##name##_handle == handle) { \
loop->next_##name##_handle = handle->name##_next; \
} \
\
if (handle->name##_prev) { \
handle->name##_prev->name##_next = handle->name##_next; \
} \
if (handle->name##_next) { \
handle->name##_next->name##_prev = handle->name##_prev; \
} \
\
handle->flags &= ~UV_HANDLE_ACTIVE; \
uv__handle_stop(handle); \
\
return 0; \
} \
\
\
void uv_##name##_invoke(uv_loop_t* loop) { \
uv_##name##_t* handle; \
\
(loop)->next_##name##_handle = (loop)->name##_handles; \
\
while ((loop)->next_##name##_handle != NULL) { \
handle = (loop)->next_##name##_handle; \
(loop)->next_##name##_handle = handle->name##_next; \
\
handle->name##_cb(handle, 0); \
} \
}
UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
UV_LOOP_WATCHER_DEFINE(check, CHECK)
UV_LOOP_WATCHER_DEFINE(idle, IDLE)

File diff suppressed because it is too large Load Diff

View File

@ -1,607 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <io.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
{0xe70f1aa0, 0xab8b, 0x11cf,
{0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
{0xf9eab0c0, 0x26d4, 0x11d0,
{0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
{0x9fc48064, 0x7298, 0x43e4,
{0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
};
typedef struct uv_single_fd_set_s {
unsigned int fd_count;
SOCKET fd_array[1];
} uv_single_fd_set_t;
static OVERLAPPED overlapped_dummy_;
static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
static void uv__init_overlapped_dummy(void) {
HANDLE event;
event = CreateEvent(NULL, TRUE, TRUE, NULL);
if (event == NULL)
uv_fatal_error(GetLastError(), "CreateEvent");
memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
}
static OVERLAPPED* uv__get_overlapped_dummy() {
uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
return &overlapped_dummy_;
}
static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
uv_req_t* req;
AFD_POLL_INFO* afd_poll_info;
DWORD result;
/* Find a yet unsubmitted req to submit. */
if (handle->submitted_events_1 == 0) {
req = &handle->poll_req_1;
afd_poll_info = &handle->afd_poll_info_1;
handle->submitted_events_1 = handle->events;
handle->mask_events_1 = 0;
handle->mask_events_2 = handle->events;
} else if (handle->submitted_events_2 == 0) {
req = &handle->poll_req_2;
afd_poll_info = &handle->afd_poll_info_2;
handle->submitted_events_2 = handle->events;
handle->mask_events_1 = handle->events;
handle->mask_events_2 = 0;
} else {
assert(0);
}
/* Setting Exclusive to TRUE makes the other poll request return if there */
/* is any. */
afd_poll_info->Exclusive = TRUE;
afd_poll_info->NumberOfHandles = 1;
afd_poll_info->Timeout.QuadPart = INT64_MAX;
afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
afd_poll_info->Handles[0].Status = 0;
afd_poll_info->Handles[0].Events = 0;
if (handle->events & UV_READABLE) {
afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
}
if (handle->events & UV_WRITABLE) {
afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
}
memset(&req->overlapped, 0, sizeof req->overlapped);
result = uv_msafd_poll((SOCKET) handle->peer_socket,
afd_poll_info,
&req->overlapped);
if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
/* Queue this req, reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
}
}
static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
AFD_POLL_INFO afd_poll_info;
DWORD result;
afd_poll_info.Exclusive = TRUE;
afd_poll_info.NumberOfHandles = 1;
afd_poll_info.Timeout.QuadPart = INT64_MAX;
afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
afd_poll_info.Handles[0].Status = 0;
afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
result = uv_msafd_poll(handle->socket,
&afd_poll_info,
uv__get_overlapped_dummy());
if (result == SOCKET_ERROR) {
DWORD error = WSAGetLastError();
if (error != WSA_IO_PENDING) {
uv__set_sys_error(loop, WSAGetLastError());
return -1;
}
}
return 0;
}
static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req) {
unsigned char mask_events;
AFD_POLL_INFO* afd_poll_info;
if (req == &handle->poll_req_1) {
afd_poll_info = &handle->afd_poll_info_1;
handle->submitted_events_1 = 0;
mask_events = handle->mask_events_1;
} else if (req == &handle->poll_req_2) {
afd_poll_info = &handle->afd_poll_info_2;
handle->submitted_events_2 = 0;
mask_events = handle->mask_events_2;
} else {
assert(0);
}
/* Report an error unless the select was just interrupted. */
if (!REQ_SUCCESS(req)) {
DWORD error = GET_REQ_SOCK_ERROR(req);
if (error != WSAEINTR && handle->events != 0) {
handle->events = 0; /* Stop the watcher */
uv__set_sys_error(loop, error);
handle->poll_cb(handle, -1, 0);
}
} else if (afd_poll_info->NumberOfHandles >= 1) {
unsigned char events = 0;
if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
events |= UV_READABLE;
}
if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
AFD_POLL_CONNECT_FAIL)) != 0) {
events |= UV_WRITABLE;
}
events &= handle->events & ~mask_events;
if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
/* Stop polling. */
handle->events = 0;
uv__handle_stop(handle);
}
if (events != 0) {
handle->poll_cb(handle, 0, events);
}
}
if ((handle->events & ~(handle->submitted_events_1 |
handle->submitted_events_2)) != 0) {
uv__fast_poll_submit_poll_req(loop, handle);
} else if ((handle->flags & UV__HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
}
static int uv__fast_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
assert(handle->type == UV_POLL);
assert(!(handle->flags & UV__HANDLE_CLOSING));
assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
handle->events = events;
if (handle->events != 0) {
uv__handle_start(handle);
} else {
uv__handle_stop(handle);
}
if ((handle->events & ~(handle->submitted_events_1 |
handle->submitted_events_2)) != 0) {
uv__fast_poll_submit_poll_req(handle->loop, handle);
}
return 0;
}
static void uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
handle->events = 0;
uv__handle_closing(handle);
if (handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
} else {
/* Cancel outstanding poll requests by executing another, unique poll */
/* request that forces the outstanding ones to return. */
uv__fast_poll_cancel_poll_req(loop, handle);
}
}
static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
WSAPROTOCOL_INFOW* protocol_info) {
SOCKET sock = 0;
sock = WSASocketW(protocol_info->iAddressFamily,
protocol_info->iSocketType,
protocol_info->iProtocol,
protocol_info,
0,
WSA_FLAG_OVERLAPPED);
if (sock == INVALID_SOCKET) {
return INVALID_SOCKET;
}
if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
goto error;
};
if (CreateIoCompletionPort((HANDLE) sock,
iocp,
(ULONG_PTR) sock,
0) == NULL) {
goto error;
}
return sock;
error:
closesocket(sock);
return INVALID_SOCKET;
}
static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
WSAPROTOCOL_INFOW* protocol_info) {
int index, i;
SOCKET peer_socket;
index = -1;
for (i = 0; i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
if (memcmp((void*) &protocol_info->ProviderId,
(void*) &uv_msafd_provider_ids[i],
sizeof protocol_info->ProviderId) == 0) {
index = i;
}
}
/* Check if the protocol uses an msafd socket. */
if (index < 0) {
return INVALID_SOCKET;
}
/* If we didn't (try) to create a peer socket yet, try to make one. Don't */
/* try again if the peer socket creation failed earlier for the same */
/* protocol. */
peer_socket = loop->poll_peer_sockets[index];
if (peer_socket == 0) {
peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
loop->poll_peer_sockets[index] = peer_socket;
}
return peer_socket;
}
static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
uv_req_t* req = (uv_req_t*) arg;
uv_poll_t* handle = (uv_poll_t*) req->data;
unsigned char reported_events;
int r;
uv_single_fd_set_t rfds, wfds, efds;
struct timeval timeout;
assert(handle->type == UV_POLL);
assert(req->type == UV_POLL_REQ);
if (handle->events & UV_READABLE) {
rfds.fd_count = 1;
rfds.fd_array[0] = handle->socket;
} else {
rfds.fd_count = 0;
}
if (handle->events & UV_WRITABLE) {
wfds.fd_count = 1;
wfds.fd_array[0] = handle->socket;
efds.fd_count = 1;
efds.fd_array[0] = handle->socket;
} else {
wfds.fd_count = 0;
efds.fd_count = 0;
}
/* Make the select() time out after 3 minutes. If select() hangs because */
/* the user closed the socket, we will at least not hang indefinitely. */
timeout.tv_sec = 3 * 60;
timeout.tv_usec = 0;
r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
if (r == SOCKET_ERROR) {
/* Queue this req, reporting an error. */
SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
POST_COMPLETION_FOR_REQ(handle->loop, req);
return 0;
}
reported_events = 0;
if (r > 0) {
if (rfds.fd_count > 0) {
assert(rfds.fd_count == 1);
assert(rfds.fd_array[0] == handle->socket);
reported_events |= UV_READABLE;
}
if (wfds.fd_count > 0) {
assert(wfds.fd_count == 1);
assert(wfds.fd_array[0] == handle->socket);
reported_events |= UV_WRITABLE;
} else if (efds.fd_count > 0) {
assert(efds.fd_count == 1);
assert(efds.fd_array[0] == handle->socket);
reported_events |= UV_WRITABLE;
}
}
SET_REQ_SUCCESS(req);
req->overlapped.InternalHigh = (DWORD) reported_events;
POST_COMPLETION_FOR_REQ(handle->loop, req);
return 0;
}
static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
uv_req_t* req;
/* Find a yet unsubmitted req to submit. */
if (handle->submitted_events_1 == 0) {
req = &handle->poll_req_1;
handle->submitted_events_1 = handle->events;
handle->mask_events_1 = 0;
handle->mask_events_2 = handle->events;
} else if (handle->submitted_events_2 == 0) {
req = &handle->poll_req_2;
handle->submitted_events_2 = handle->events;
handle->mask_events_1 = handle->events;
handle->mask_events_2 = 0;
} else {
assert(0);
}
if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
(void*) req,
WT_EXECUTELONGFUNCTION)) {
/* Make this req pending, reporting an error. */
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, req);
}
}
static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req) {
unsigned char mask_events;
if (req == &handle->poll_req_1) {
handle->submitted_events_1 = 0;
mask_events = handle->mask_events_1;
} else if (req == &handle->poll_req_2) {
handle->submitted_events_2 = 0;
mask_events = handle->mask_events_2;
} else {
assert(0);
}
if (!REQ_SUCCESS(req)) {
/* Error. */
if (handle->events != 0) {
handle->events = 0; /* Stop the watcher */
uv__set_sys_error(loop, GET_REQ_ERROR(req));
handle->poll_cb(handle, -1, 0);
}
} else {
/* Got some events. */
int events = req->overlapped.InternalHigh & handle->events & ~mask_events;
if (events != 0) {
handle->poll_cb(handle, 0, events);
}
}
if ((handle->events & ~(handle->submitted_events_1 |
handle->submitted_events_2)) != 0) {
uv__slow_poll_submit_poll_req(loop, handle);
} else if ((handle->flags & UV__HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
}
static int uv__slow_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
assert(handle->type == UV_POLL);
assert(!(handle->flags & UV__HANDLE_CLOSING));
assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
handle->events = events;
if (handle->events != 0) {
uv__handle_start(handle);
} else {
uv__handle_stop(handle);
}
if ((handle->events &
~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) {
uv__slow_poll_submit_poll_req(handle->loop, handle);
}
return 0;
}
static void uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
handle->events = 0;
uv__handle_closing(handle);
if (handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
}
int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
return uv_poll_init_socket(loop, handle, (SOCKET) _get_osfhandle(fd));
}
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
uv_os_sock_t socket) {
WSAPROTOCOL_INFOW protocol_info;
int len;
SOCKET peer_socket, base_socket;
DWORD bytes;
/* Try to obtain a base handle for the socket. This increases this chances */
/* that we find an AFD handle and are able to use the fast poll mechanism. */
/* This will always fail on windows XP/2k3, since they don't support the */
/* SIO_BASE_HANDLE ioctl. */
#ifndef NDEBUG
base_socket = INVALID_SOCKET;
#endif
if (WSAIoctl(socket,
SIO_BASE_HANDLE,
NULL,
0,
&base_socket,
sizeof base_socket,
&bytes,
NULL,
NULL) == 0) {
assert(base_socket != 0 && base_socket != INVALID_SOCKET);
socket = base_socket;
}
uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
handle->socket = socket;
handle->events = 0;
/* Obtain protocol information about the socket. */
len = sizeof protocol_info;
if (getsockopt(socket,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) &protocol_info,
&len) != 0) {
uv__set_sys_error(loop, WSAGetLastError());
return -1;
}
/* Get the peer socket that is needed to enable fast poll. If the returned */
/* value is NULL, the protocol is not implemented by MSAFD and we'll have */
/* to use slow mode. */
peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
if (peer_socket != INVALID_SOCKET) {
/* Initialize fast poll specific fields. */
handle->peer_socket = peer_socket;
} else {
/* Initialize slow poll specific fields. */
handle->flags |= UV_HANDLE_POLL_SLOW;
}
/* Intialize 2 poll reqs. */
handle->submitted_events_1 = 0;
uv_req_init(loop, (uv_req_t*) &(handle->poll_req_1));
handle->poll_req_1.type = UV_POLL_REQ;
handle->poll_req_1.data = handle;
handle->submitted_events_2 = 0;
uv_req_init(loop, (uv_req_t*) &(handle->poll_req_2));
handle->poll_req_2.type = UV_POLL_REQ;
handle->poll_req_2.data = handle;
return 0;
}
int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
if (uv__fast_poll_set(handle->loop, handle, events) < 0)
return -1;
} else {
if (uv__slow_poll_set(handle->loop, handle, events) < 0)
return -1;
}
handle->poll_cb = cb;
return 0;
}
int uv_poll_stop(uv_poll_t* handle) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
return uv__fast_poll_set(handle->loop, handle, 0);
} else {
return uv__slow_poll_set(handle->loop, handle, 0);
}
}
void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
uv__fast_poll_process_poll_req(loop, handle, req);
} else {
uv__slow_poll_process_poll_req(loop, handle, req);
}
}
void uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
uv__fast_poll_close(loop, handle);
} else {
uv__slow_poll_close(loop, handle);
}
}
void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
assert(handle->flags & UV__HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
assert(handle->submitted_events_1 == 0);
assert(handle->submitted_events_2 == 0);
uv__handle_close(handle);
}

View File

@ -1,510 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <io.h>
#include <stdio.h>
#include <stdlib.h>
#include "uv.h"
#include "internal.h"
/*
* The `child_stdio_buffer` buffer has the following layout:
* int number_of_fds
* unsigned char crt_flags[number_of_fds]
* HANDLE os_handle[number_of_fds]
*/
#define CHILD_STDIO_SIZE(count) \
(sizeof(int) + \
sizeof(unsigned char) * (count) + \
sizeof(uintptr_t) * (count))
#define CHILD_STDIO_COUNT(buffer) \
*((unsigned int*) (buffer))
#define CHILD_STDIO_CRT_FLAGS(buffer, fd) \
*((unsigned char*) (buffer) + sizeof(int) + fd)
#define CHILD_STDIO_HANDLE(buffer, fd) \
*((HANDLE*) ((unsigned char*) (buffer) + \
sizeof(int) + \
sizeof(unsigned char) * \
CHILD_STDIO_COUNT((buffer)) + \
sizeof(HANDLE) * (fd)))
/* CRT file descriptor mode flags */
#define FOPEN 0x01
#define FEOFLAG 0x02
#define FCRLF 0x04
#define FPIPE 0x08
#define FNOINHERIT 0x10
#define FAPPEND 0x20
#define FDEV 0x40
#define FTEXT 0x80
/*
* Clear the HANDLE_FLAG_INHERIT flag from all HANDLEs that were inherited
* the parent process. Don't check for errors - the stdio handles may not be
* valid, or may be closed already. There is no guarantee that this function
* does a perfect job.
*/
void uv_disable_stdio_inheritance(void) {
HANDLE handle;
STARTUPINFOW si;
/* Make the windows stdio handles non-inheritable. */
handle = GetStdHandle(STD_INPUT_HANDLE);
if (handle != NULL && handle != INVALID_HANDLE_VALUE)
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
handle = GetStdHandle(STD_OUTPUT_HANDLE);
if (handle != NULL && handle != INVALID_HANDLE_VALUE)
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
handle = GetStdHandle(STD_ERROR_HANDLE);
if (handle != NULL && handle != INVALID_HANDLE_VALUE)
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
/* Make inherited CRT FDs non-inheritable. */
GetStartupInfoW(&si);
if (uv__stdio_verify(si.lpReserved2, si.cbReserved2))
uv__stdio_noinherit(si.lpReserved2);
}
static uv_err_t uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
char pipe_name[64];
SECURITY_ATTRIBUTES sa;
DWORD server_access = 0;
DWORD client_access = 0;
HANDLE child_pipe = INVALID_HANDLE_VALUE;
uv_err_t err;
if (flags & UV_READABLE_PIPE) {
/* The server needs inbound access too, otherwise CreateNamedPipe() */
/* won't give us the FILE_READ_ATTRIBUTES permission. We need that to */
/* probe the state of the write buffer when we're trying to shutdown */
/* the pipe. */
server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND;
client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
}
if (flags & UV_WRITABLE_PIPE) {
server_access |= PIPE_ACCESS_INBOUND;
client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
}
/* Create server pipe handle. */
err = uv_stdio_pipe_server(loop,
server_pipe,
server_access,
pipe_name,
sizeof(pipe_name));
if (err.code != UV_OK)
goto error;
/* Create child pipe handle. */
sa.nLength = sizeof sa;
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = TRUE;
child_pipe = CreateFileA(pipe_name,
client_access,
0,
&sa,
OPEN_EXISTING,
server_pipe->ipc ? FILE_FLAG_OVERLAPPED : 0,
NULL);
if (child_pipe == INVALID_HANDLE_VALUE) {
err = uv__new_sys_error(GetLastError());
goto error;
}
#ifndef NDEBUG
/* Validate that the pipe was opened in the right mode. */
{
DWORD mode;
BOOL r = GetNamedPipeHandleState(child_pipe,
&mode,
NULL,
NULL,
NULL,
NULL,
0);
assert(r == TRUE);
assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
}
#endif
/* Do a blocking ConnectNamedPipe. This should not block because we have */
/* both ends of the pipe created. */
if (!ConnectNamedPipe(server_pipe->handle, NULL)) {
if (GetLastError() != ERROR_PIPE_CONNECTED) {
err = uv__new_sys_error(GetLastError());
goto error;
}
}
/* The server end is now readable and/or writable. */
if (flags & UV_READABLE_PIPE)
server_pipe->flags |= UV_HANDLE_WRITABLE;
if (flags & UV_WRITABLE_PIPE)
server_pipe->flags |= UV_HANDLE_READABLE;
*child_pipe_ptr = child_pipe;
return uv_ok_;
error:
if (server_pipe->handle != INVALID_HANDLE_VALUE) {
uv_pipe_cleanup(loop, server_pipe);
}
if (child_pipe != INVALID_HANDLE_VALUE) {
CloseHandle(child_pipe);
}
return err;
}
static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
HANDLE current_process;
/* _get_osfhandle will sometimes return -2 in case of an error. This seems */
/* to happen when fd <= 2 and the process' corresponding stdio handle is */
/* set to NULL. Unfortunately DuplicateHandle will happily duplicate /*
/* (HANDLE) -2, so this situation goes unnoticed until someone tries to */
/* use the duplicate. Therefore we filter out known-invalid handles here. */
if (handle == INVALID_HANDLE_VALUE ||
handle == NULL ||
handle == (HANDLE) -2) {
*dup = INVALID_HANDLE_VALUE;
uv__set_artificial_error(loop, UV_EBADF);
return -1;
}
current_process = GetCurrentProcess();
if (!DuplicateHandle(current_process,
handle,
current_process,
dup,
0,
TRUE,
DUPLICATE_SAME_ACCESS)) {
*dup = INVALID_HANDLE_VALUE;
uv__set_sys_error(loop, GetLastError());
return -1;
}
return 0;
}
static int uv__duplicate_fd(uv_loop_t* loop, int fd, HANDLE* dup) {
HANDLE handle;
if (fd == -1) {
*dup = INVALID_HANDLE_VALUE;
uv__set_artificial_error(loop, UV_EBADF);
return -1;
}
handle = (HANDLE) _get_osfhandle(fd);
return uv__duplicate_handle(loop, handle, dup);
}
uv_err_t uv__create_nul_handle(HANDLE* handle_ptr,
DWORD access) {
HANDLE handle;
SECURITY_ATTRIBUTES sa;
sa.nLength = sizeof sa;
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = TRUE;
handle = CreateFileW(L"NUL",
access,
FILE_SHARE_READ | FILE_SHARE_WRITE,
&sa,
OPEN_EXISTING,
0,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
return uv__new_sys_error(GetLastError());
}
*handle_ptr = handle;
return uv_ok_;
}
uv_err_t uv__stdio_create(uv_loop_t* loop, uv_process_options_t* options,
BYTE** buffer_ptr) {
BYTE* buffer;
int count, i;
uv_err_t err;
count = options->stdio_count;
if (count < 0 || count > 255) {
/* Only support FDs 0-255 */
return uv__new_artificial_error(UV_ENOTSUP);
} else if (count < 3) {
/* There should always be at least 3 stdio handles. */
count = 3;
}
/* Allocate the child stdio buffer */
buffer = (BYTE*) malloc(CHILD_STDIO_SIZE(count));
if (buffer == NULL) {
return uv__new_artificial_error(UV_ENOMEM);
}
/* Prepopulate the buffer with INVALID_HANDLE_VALUE handles so we can */
/* clean up on failure. */
CHILD_STDIO_COUNT(buffer) = count;
for (i = 0; i < count; i++) {
CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE;
}
for (i = 0; i < count; i++) {
uv_stdio_container_t fdopt;
if (i < options->stdio_count) {
fdopt = options->stdio[i];
} else {
fdopt.flags = UV_IGNORE;
}
switch (fdopt.flags & (UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD |
UV_INHERIT_STREAM)) {
case UV_IGNORE:
/* Starting a process with no stdin/stout/stderr can confuse it. */
/* So no matter what the user specified, we make sure the first */
/* three FDs are always open in their typical modes, e.g. stdin */
/* be readable and stdout/err should be writable. For FDs > 2, don't */
/* do anything - all handles in the stdio buffer are initialized with */
/* INVALID_HANDLE_VALUE, which should be okay. */
if (i <= 2) {
DWORD access = (i == 0) ? FILE_GENERIC_READ :
FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES;
err = uv__create_nul_handle(&CHILD_STDIO_HANDLE(buffer, i),
access);
if (err.code != UV_OK)
goto error;
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
}
break;
case UV_CREATE_PIPE: {
/* Create a pair of two connected pipe ends; one end is turned into */
/* an uv_pipe_t for use by the parent. The other one is given to */
/* the child. */
uv_pipe_t* parent_pipe = (uv_pipe_t*) fdopt.data.stream;
HANDLE child_pipe;
/* Create a new, connected pipe pair. stdio[i].stream should point */
/* to an uninitialized, but not connected pipe handle. */
assert(fdopt.data.stream->type == UV_NAMED_PIPE);
assert(!(fdopt.data.stream->flags & UV_HANDLE_CONNECTION));
assert(!(fdopt.data.stream->flags & UV_HANDLE_PIPESERVER));
err = uv__create_stdio_pipe_pair(loop,
parent_pipe,
&child_pipe,
fdopt.flags);
if (err.code != UV_OK)
goto error;
CHILD_STDIO_HANDLE(buffer, i) = child_pipe;
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
break;
}
case UV_INHERIT_FD: {
/* Inherit a raw FD. */
HANDLE child_handle;
/* Make an inheritable duplicate of the handle. */
if (uv__duplicate_fd(loop, fdopt.data.fd, &child_handle) < 0) {
/* If fdopt.data.fd is not valid and fd fd <= 2, then ignore the */
/* error. */
if (fdopt.data.fd <= 2 && loop->last_err.code == UV_EBADF) {
CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE;
break;
}
goto error;
}
/* Figure out what the type is. */
switch (GetFileType(child_handle)) {
case FILE_TYPE_DISK:
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN;
break;
case FILE_TYPE_PIPE:
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
case FILE_TYPE_CHAR:
case FILE_TYPE_REMOTE:
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
break;
case FILE_TYPE_UNKNOWN:
if (GetLastError() != 0) {
uv__set_sys_error(loop, GetLastError());
CloseHandle(child_handle);
goto error;
}
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
break;
default:
assert(0);
}
CHILD_STDIO_HANDLE(buffer, i) = child_handle;
break;
}
case UV_INHERIT_STREAM: {
/* Use an existing stream as the stdio handle for the child. */
HANDLE stream_handle, child_handle;
unsigned char crt_flags;
uv_stream_t* stream = fdopt.data.stream;
/* Leech the handle out of the stream. */
if (stream->type == UV_TTY) {
stream_handle = ((uv_tty_t*) stream)->handle;
crt_flags = FOPEN | FDEV;
} else if (stream->type == UV_NAMED_PIPE &&
stream->flags & UV_HANDLE_CONNECTED) {
stream_handle = ((uv_pipe_t*) stream)->handle;
crt_flags = FOPEN | FPIPE;
} else {
stream_handle = INVALID_HANDLE_VALUE;
crt_flags = 0;
}
if (stream_handle == NULL ||
stream_handle == INVALID_HANDLE_VALUE) {
/* The handle is already closed, or not yet created, or the */
/* stream type is not supported. */
uv__set_artificial_error(loop, UV_ENOTSUP);
goto error;
}
/* Make an inheritable copy of the handle. */
if (uv__duplicate_handle(loop,
stream_handle,
&child_handle) < 0) {
goto error;
}
CHILD_STDIO_HANDLE(buffer, i) = child_handle;
CHILD_STDIO_CRT_FLAGS(buffer, i) = crt_flags;
break;
}
default:
assert(0);
}
}
*buffer_ptr = buffer;
return uv_ok_;
error:
uv__stdio_destroy(buffer);
return err;
}
void uv__stdio_destroy(BYTE* buffer) {
int i, count;
count = CHILD_STDIO_COUNT(buffer);
for (i = 0; i < count; i++) {
HANDLE handle = CHILD_STDIO_HANDLE(buffer, i);
if (handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle);
}
}
free(buffer);
}
void uv__stdio_noinherit(BYTE* buffer) {
int i, count;
count = CHILD_STDIO_COUNT(buffer);
for (i = 0; i < count; i++) {
HANDLE handle = CHILD_STDIO_HANDLE(buffer, i);
if (handle != INVALID_HANDLE_VALUE) {
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
}
}
}
int uv__stdio_verify(BYTE* buffer, WORD size) {
unsigned int count;
/* Check the buffer pointer. */
if (buffer == NULL)
return 0;
/* Verify that the buffer is at least big enough to hold the count. */
if (size < CHILD_STDIO_SIZE(0))
return 0;
/* Verify if the count is within range. */
count = CHILD_STDIO_COUNT(buffer);
if (count > 256)
return 0;
/* Verify that the buffer size is big enough to hold info for N FDs. */
if (size < CHILD_STDIO_SIZE(count))
return 0;
return 1;
}
WORD uv__stdio_size(BYTE* buffer) {
return (WORD) CHILD_STDIO_SIZE(CHILD_STDIO_COUNT((buffer)));
}
HANDLE uv__stdio_handle(BYTE* buffer, int fd) {
return CHILD_STDIO_HANDLE(buffer, fd);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,224 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_REQ_INL_H_
#define UV_WIN_REQ_INL_H_
#include <assert.h>
#include "uv.h"
#include "internal.h"
#define SET_REQ_STATUS(req, status) \
(req)->overlapped.Internal = (ULONG_PTR) (status)
#define SET_REQ_ERROR(req, error) \
SET_REQ_STATUS((req), NTSTATUS_FROM_WIN32((error)))
#define SET_REQ_SUCCESS(req) \
SET_REQ_STATUS((req), STATUS_SUCCESS)
#define GET_REQ_STATUS(req) \
((NTSTATUS) (req)->overlapped.Internal)
#define REQ_SUCCESS(req) \
(NT_SUCCESS(GET_REQ_STATUS((req))))
#define GET_REQ_ERROR(req) \
(pRtlNtStatusToDosError(GET_REQ_STATUS((req))))
#define GET_REQ_SOCK_ERROR(req) \
(uv_ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
#define REGISTER_HANDLE_REQ(loop, handle, req) \
do { \
INCREASE_ACTIVE_COUNT((loop), (handle)); \
uv__req_register((loop), (req)); \
} while (0)
#define UNREGISTER_HANDLE_REQ(loop, handle, req) \
do { \
DECREASE_ACTIVE_COUNT((loop), (handle)); \
uv__req_unregister((loop), (req)); \
} while (0)
#define UV_SUCCEEDED_WITHOUT_IOCP(result) \
((result) && (handle->flags & UV_HANDLE_SYNC_BYPASS_IOCP))
#define UV_SUCCEEDED_WITH_IOCP(result) \
((result) || (GetLastError() == ERROR_IO_PENDING))
#define POST_COMPLETION_FOR_REQ(loop, req) \
if (!PostQueuedCompletionStatus((loop)->iocp, \
0, \
0, \
&((req)->overlapped))) { \
uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); \
}
INLINE static void uv_req_init(uv_loop_t* loop, uv_req_t* req) {
req->type = UV_UNKNOWN_REQ;
SET_REQ_SUCCESS(req);
}
INLINE static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
return CONTAINING_RECORD(overlapped, uv_req_t, overlapped);
}
INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
req->next_req = NULL;
if (loop->pending_reqs_tail) {
req->next_req = loop->pending_reqs_tail->next_req;
loop->pending_reqs_tail->next_req = req;
loop->pending_reqs_tail = req;
} else {
req->next_req = req;
loop->pending_reqs_tail = req;
}
}
#define DELEGATE_STREAM_REQ(loop, req, method, handle_at) \
do { \
switch (((uv_handle_t*) (req)->handle_at)->type) { \
case UV_TCP: \
uv_process_tcp_##method##_req(loop, \
(uv_tcp_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_NAMED_PIPE: \
uv_process_pipe_##method##_req(loop, \
(uv_pipe_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_TTY: \
uv_process_tty_##method##_req(loop, \
(uv_tty_t*) ((req)->handle_at), \
req); \
break; \
\
default: \
assert(0); \
} \
} while (0)
INLINE static void uv_process_reqs(uv_loop_t* loop) {
uv_req_t* req;
uv_req_t* first;
uv_req_t* next;
if (loop->pending_reqs_tail == NULL) {
return;
}
first = loop->pending_reqs_tail->next_req;
next = first;
loop->pending_reqs_tail = NULL;
while (next != NULL) {
req = next;
next = req->next_req != first ? req->next_req : NULL;
switch (req->type) {
case UV_READ:
DELEGATE_STREAM_REQ(loop, req, read, data);
break;
case UV_WRITE:
DELEGATE_STREAM_REQ(loop, (uv_write_t*) req, write, handle);
break;
case UV_ACCEPT:
DELEGATE_STREAM_REQ(loop, req, accept, data);
break;
case UV_CONNECT:
DELEGATE_STREAM_REQ(loop, (uv_connect_t*) req, connect, handle);
break;
case UV_SHUTDOWN:
/* Tcp shutdown requests don't come here. */
assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE);
uv_process_pipe_shutdown_req(
loop,
(uv_pipe_t*) ((uv_shutdown_t*) req)->handle,
(uv_shutdown_t*) req);
break;
case UV_UDP_RECV:
uv_process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
break;
case UV_UDP_SEND:
uv_process_udp_send_req(loop,
((uv_udp_send_t*) req)->handle,
(uv_udp_send_t*) req);
break;
case UV_WAKEUP:
uv_process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
break;
case UV_SIGNAL_REQ:
uv_process_signal_req(loop, (uv_signal_t*) req->data, req);
break;
case UV_POLL_REQ:
uv_process_poll_req(loop, (uv_poll_t*) req->data, req);
break;
case UV_GETADDRINFO:
uv_process_getaddrinfo_req(loop, (uv_getaddrinfo_t*) req);
break;
case UV_PROCESS_EXIT:
uv_process_proc_exit(loop, (uv_process_t*) req->data);
break;
case UV_FS:
uv_process_fs_req(loop, (uv_fs_t*) req);
break;
case UV_WORK:
uv_process_work_req(loop, (uv_work_t*) req);
break;
case UV_FS_EVENT_REQ:
uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
break;
default:
assert(0);
}
}
}
#endif /* UV_WIN_REQ_INL_H_ */

View File

@ -1,25 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"

View File

@ -1,354 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <signal.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
RB_HEAD(uv_signal_tree_s, uv_signal_s);
static struct uv_signal_tree_s uv__signal_tree = RB_INITIALIZER(uv__signal_tree);
static ssize_t volatile uv__signal_control_handler_refs = 0;
static CRITICAL_SECTION uv__signal_lock;
void uv_signals_init() {
InitializeCriticalSection(&uv__signal_lock);
}
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
/* Compare signums first so all watchers with the same signnum end up */
/* adjacent. */
if (w1->signum < w2->signum) return -1;
if (w1->signum > w2->signum) return 1;
/* Sort by loop pointer, so we can easily look up the first item after */
/* { .signum = x, .loop = NULL } */
if ((uintptr_t) w1->loop < (uintptr_t) w2->loop) return -1;
if ((uintptr_t) w1->loop > (uintptr_t) w2->loop) return 1;
if ((uintptr_t) w1 < (uintptr_t) w2) return -1;
if ((uintptr_t) w1 > (uintptr_t) w2) return 1;
return 0;
}
RB_GENERATE_STATIC(uv_signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare);
/*
* Dispatches signal {signum} to all active uv_signal_t watchers in all loops.
* Returns 1 if the signal was dispatched to any watcher, or 0 if there were
* no active signal watchers observing this signal.
*/
int uv__signal_dispatch(int signum) {
uv_signal_t lookup;
uv_signal_t* handle;
int dispatched = 0;
EnterCriticalSection(&uv__signal_lock);
lookup.signum = signum;
lookup.loop = NULL;
for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup);
handle != NULL && handle->signum == signum;
handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) {
unsigned long previous = InterlockedExchange(&handle->pending_signum, signum);
if (!previous) {
POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req);
}
dispatched = 1;
}
LeaveCriticalSection(&uv__signal_lock);
return dispatched;
}
static BOOL WINAPI uv__signal_control_handler(DWORD type) {
switch (type) {
case CTRL_C_EVENT:
return uv__signal_dispatch(SIGINT);
case CTRL_BREAK_EVENT:
return uv__signal_dispatch(SIGBREAK);
case CTRL_CLOSE_EVENT:
if (uv__signal_dispatch(SIGHUP)) {
/* Windows will terminate the process after the control handler */
/* returns. After that it will just terminate our process. Therefore */
/* block the signal handler so the main loop has some time to pick */
/* up the signal and do something for a few seconds. */
Sleep(INFINITE);
return TRUE;
}
return FALSE;
case CTRL_LOGOFF_EVENT:
case CTRL_SHUTDOWN_EVENT:
/* These signals are only sent to services. Services have their own */
/* notification mechanism, so there's no point in handling these. */
default:
/* We don't handle these. */
return FALSE;
}
}
static uv_err_t uv__signal_register_control_handler() {
/* When this function is called, the uv__signal_lock must be held. */
/* If the console control handler has already been hooked, just add a */
/* reference. */
if (uv__signal_control_handler_refs > 0)
return uv_ok_;
if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE))
return uv__new_sys_error(GetLastError());
uv__signal_control_handler_refs++;
return uv_ok_;
}
static void uv__signal_unregister_control_handler() {
/* When this function is called, the uv__signal_lock must be held. */
BOOL r;
/* Don't unregister if the number of console control handlers exceeds one. */
/* Just remove a reference in that case. */
if (uv__signal_control_handler_refs > 1) {
uv__signal_control_handler_refs--;
return;
}
assert(uv__signal_control_handler_refs == 1);
r = SetConsoleCtrlHandler(uv__signal_control_handler, FALSE);
/* This should never fail; if it does it is probably a bug in libuv. */
assert(r);
uv__signal_control_handler_refs--;
}
static uv_err_t uv__signal_register(int signum) {
switch (signum) {
case SIGINT:
case SIGBREAK:
case SIGHUP:
return uv__signal_register_control_handler();
case SIGWINCH:
/* SIGWINCH is generated in tty.c. No need to register anything. */
return uv_ok_;
case SIGILL:
case SIGABRT_COMPAT:
case SIGFPE:
case SIGSEGV:
case SIGTERM:
case SIGABRT:
/* Signal is never raised. */
return uv_ok_;
default:
/* Invalid signal. */
return uv__new_artificial_error(UV_EINVAL);
}
}
static void uv__signal_unregister(int signum) {
switch (signum) {
case SIGINT:
case SIGBREAK:
case SIGHUP:
uv__signal_unregister_control_handler();
return;
case SIGWINCH:
/* SIGWINCH is generated in tty.c. No need to unregister anything. */
return;
case SIGILL:
case SIGABRT_COMPAT:
case SIGFPE:
case SIGSEGV:
case SIGTERM:
case SIGABRT:
/* Nothing is registered for this signal. */
return;
default:
/* Libuv bug. */
assert(0 && "Invalid signum");
return;
}
}
int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
uv_req_t* req;
uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
handle->pending_signum = 0;
handle->signum = 0;
handle->signal_cb = NULL;
req = &handle->signal_req;
uv_req_init(loop, req);
req->type = UV_SIGNAL_REQ;
req->data = handle;
return 0;
}
int uv_signal_stop(uv_signal_t* handle) {
uv_signal_t* removed_handle;
/* If the watcher wasn't started, this is a no-op. */
if (handle->signum == 0)
return 0;
EnterCriticalSection(&uv__signal_lock);
uv__signal_unregister(handle->signum);
removed_handle = RB_REMOVE(uv_signal_tree_s, &uv__signal_tree, handle);
assert(removed_handle == handle);
LeaveCriticalSection(&uv__signal_lock);
handle->signum = 0;
uv__handle_stop(handle);
return 0;
}
int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
uv_err_t err;
/* If the user supplies signum == 0, then return an error already. If the */
/* signum is otherwise invalid then uv__signal_register will find out */
/* eventually. */
if (signum == 0) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
return -1;
}
/* Short circuit: if the signal watcher is already watching {signum} don't */
/* go through the process of deregistering and registering the handler. */
/* Additionally, this avoids pending signals getting lost in the (small) */
/* time frame that handle->signum == 0. */
if (signum == handle->signum) {
handle->signal_cb = signal_cb;
return 0;
}
/* If the signal handler was already active, stop it first. */
if (handle->signum != 0) {
int r = uv_signal_stop(handle);
/* uv_signal_stop is infallible. */
assert(r == 0);
}
EnterCriticalSection(&uv__signal_lock);
err = uv__signal_register(signum);
if (err.code != UV_OK) {
/* Uh-oh, didn't work. */
handle->loop->last_err = err;
LeaveCriticalSection(&uv__signal_lock);
return -1;
}
handle->signum = signum;
RB_INSERT(uv_signal_tree_s, &uv__signal_tree, handle);
LeaveCriticalSection(&uv__signal_lock);
handle->signal_cb = signal_cb;
uv__handle_start(handle);
return 0;
}
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req) {
unsigned long dispatched_signum;
assert(handle->type == UV_SIGNAL);
assert(req->type == UV_SIGNAL_REQ);
dispatched_signum = InterlockedExchange(&handle->pending_signum, 0);
assert(dispatched_signum != 0);
/* Check if the pending signal equals the signum that we are watching for. */
/* These can get out of sync when the handler is stopped and restarted */
/* while the signal_req is pending. */
if (dispatched_signum == handle->signum)
handle->signal_cb(handle, dispatched_signum);
if (handle->flags & UV__HANDLE_CLOSING) {
/* When it is closing, it must be stopped at this point. */
assert(handle->signum == 0);
uv_want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) {
uv_signal_stop(handle);
uv__handle_closing(handle);
if (handle->pending_signum == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
assert(handle->flags & UV__HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
assert(handle->signum == 0);
assert(handle->pending_signum == 0);
handle->flags |= UV_HANDLE_CLOSED;
uv__handle_close(handle);
}

View File

@ -1,67 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_STREAM_INL_H_
#define UV_WIN_STREAM_INL_H_
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
INLINE static void uv_stream_init(uv_loop_t* loop,
uv_stream_t* handle,
uv_handle_type type) {
uv__handle_init(loop, (uv_handle_t*) handle, type);
handle->write_queue_size = 0;
handle->activecnt = 0;
}
INLINE static void uv_connection_init(uv_stream_t* handle) {
handle->flags |= UV_HANDLE_CONNECTION;
handle->write_reqs_pending = 0;
uv_req_init(handle->loop, (uv_req_t*) &(handle->read_req));
handle->read_req.event_handle = NULL;
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
handle->read_req.type = UV_READ;
handle->read_req.data = handle;
handle->shutdown_req = NULL;
}
INLINE static size_t uv_count_bufs(uv_buf_t bufs[], int count) {
size_t bytes = 0;
int i;
for (i = 0; i < count; i++) {
bytes += (size_t)bufs[i].len;
}
return bytes;
}
#endif /* UV_WIN_STREAM_INL_H_ */

View File

@ -1,198 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
switch (stream->type) {
case UV_TCP:
return uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
case UV_NAMED_PIPE:
return uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
default:
assert(0);
return -1;
}
}
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
switch (server->type) {
case UV_TCP:
return uv_tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
case UV_NAMED_PIPE:
return uv_pipe_accept((uv_pipe_t*)server, client);
default:
assert(0);
return -1;
}
}
int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
if (handle->flags & UV_HANDLE_READING) {
uv__set_artificial_error(handle->loop, UV_EALREADY);
return -1;
}
if (!(handle->flags & UV_HANDLE_READABLE)) {
uv__set_artificial_error(handle->loop, UV_ENOTCONN);
return -1;
}
switch (handle->type) {
case UV_TCP:
return uv_tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
case UV_NAMED_PIPE:
return uv_pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
case UV_TTY:
return uv_tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
default:
assert(0);
return -1;
}
}
int uv_read2_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
uv_read2_cb read_cb) {
if (handle->flags & UV_HANDLE_READING) {
uv__set_artificial_error(handle->loop, UV_EALREADY);
return -1;
}
if (!(handle->flags & UV_HANDLE_READABLE)) {
uv__set_artificial_error(handle->loop, UV_ENOTCONN);
return -1;
}
switch (handle->type) {
case UV_NAMED_PIPE:
return uv_pipe_read2_start((uv_pipe_t*)handle, alloc_cb, read_cb);
default:
assert(0);
return -1;
}
}
int uv_read_stop(uv_stream_t* handle) {
if (!(handle->flags & UV_HANDLE_READING))
return 0;
if (handle->type == UV_TTY) {
return uv_tty_read_stop((uv_tty_t*) handle);
} else {
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(handle->loop, handle);
return 0;
}
}
int uv_write(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[], int bufcnt,
uv_write_cb cb) {
uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
uv__set_artificial_error(loop, UV_EPIPE);
return -1;
}
switch (handle->type) {
case UV_TCP:
return uv_tcp_write(loop, req, (uv_tcp_t*) handle, bufs, bufcnt, cb);
case UV_NAMED_PIPE:
return uv_pipe_write(loop, req, (uv_pipe_t*) handle, bufs, bufcnt, cb);
case UV_TTY:
return uv_tty_write(loop, req, (uv_tty_t*) handle, bufs, bufcnt, cb);
default:
assert(0);
uv__set_sys_error(loop, WSAEINVAL);
return -1;
}
}
int uv_write2(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[], int bufcnt,
uv_stream_t* send_handle, uv_write_cb cb) {
uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
uv__set_artificial_error(loop, UV_EPIPE);
return -1;
}
switch (handle->type) {
case UV_NAMED_PIPE:
return uv_pipe_write2(loop, req, (uv_pipe_t*) handle, bufs, bufcnt, send_handle, cb);
default:
assert(0);
uv__set_sys_error(loop, WSAEINVAL);
return -1;
}
}
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
uv__set_artificial_error(loop, UV_EPIPE);
return -1;
}
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
uv__set_artificial_error(loop, UV_EPIPE);
return -1;
}
uv_req_init(loop, (uv_req_t*) req);
req->type = UV_SHUTDOWN;
req->handle = handle;
req->cb = cb;
handle->flags &= ~UV_HANDLE_WRITABLE;
handle->shutdown_req = req;
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_want_endgame(loop, (uv_handle_t*)handle);
return 0;
}
int uv_is_readable(const uv_stream_t* handle) {
return !!(handle->flags & UV_HANDLE_READABLE);
}
int uv_is_writable(const uv_stream_t* handle) {
return !!(handle->flags & UV_HANDLE_WRITABLE);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,666 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <limits.h>
#include "uv.h"
#include "internal.h"
#define HAVE_SRWLOCK_API() (pTryAcquireSRWLockShared != NULL)
#define HAVE_CONDVAR_API() (pInitializeConditionVariable != NULL)
#ifdef _MSC_VER /* msvc */
# define inline __inline
# define NOINLINE __declspec (noinline)
#else /* gcc */
# define inline inline
# define NOINLINE __attribute__ ((noinline))
#endif
inline static int uv__rwlock_srwlock_init(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_destroy(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_rdlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_srwlock_tryrdlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_rdunlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_wrlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_srwlock_trywrlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_destroy(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_rdlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_fallback_tryrdlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_rdunlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_wrlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_fallback_trywrlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_wrunlock(uv_rwlock_t* rwlock);
inline static int uv_cond_fallback_init(uv_cond_t* cond);
inline static void uv_cond_fallback_destroy(uv_cond_t* cond);
inline static void uv_cond_fallback_signal(uv_cond_t* cond);
inline static void uv_cond_fallback_broadcast(uv_cond_t* cond);
inline static void uv_cond_fallback_wait(uv_cond_t* cond, uv_mutex_t* mutex);
inline static int uv_cond_fallback_timedwait(uv_cond_t* cond,
uv_mutex_t* mutex, uint64_t timeout);
inline static int uv_cond_condvar_init(uv_cond_t* cond);
inline static void uv_cond_condvar_destroy(uv_cond_t* cond);
inline static void uv_cond_condvar_signal(uv_cond_t* cond);
inline static void uv_cond_condvar_broadcast(uv_cond_t* cond);
inline static void uv_cond_condvar_wait(uv_cond_t* cond, uv_mutex_t* mutex);
inline static int uv_cond_condvar_timedwait(uv_cond_t* cond,
uv_mutex_t* mutex, uint64_t timeout);
static NOINLINE void uv__once_inner(uv_once_t* guard,
void (*callback)(void)) {
DWORD result;
HANDLE existing_event, created_event;
created_event = CreateEvent(NULL, 1, 0, NULL);
if (created_event == 0) {
/* Could fail in a low-memory situation? */
uv_fatal_error(GetLastError(), "CreateEvent");
}
existing_event = InterlockedCompareExchangePointer(&guard->event,
created_event,
NULL);
if (existing_event == NULL) {
/* We won the race */
callback();
result = SetEvent(created_event);
assert(result);
guard->ran = 1;
} else {
/* We lost the race. Destroy the event we created and wait for the */
/* existing one todv become signaled. */
CloseHandle(created_event);
result = WaitForSingleObject(existing_event, INFINITE);
assert(result == WAIT_OBJECT_0);
}
}
void uv_once(uv_once_t* guard, void (*callback)(void)) {
/* Fast case - avoid WaitForSingleObject. */
if (guard->ran) {
return;
}
uv__once_inner(guard, callback);
}
int uv_thread_join(uv_thread_t *tid) {
if (WaitForSingleObject(*tid, INFINITE))
return -1;
else {
CloseHandle(*tid);
*tid = 0;
return 0;
}
}
int uv_mutex_init(uv_mutex_t* mutex) {
InitializeCriticalSection(mutex);
return 0;
}
void uv_mutex_destroy(uv_mutex_t* mutex) {
DeleteCriticalSection(mutex);
}
void uv_mutex_lock(uv_mutex_t* mutex) {
EnterCriticalSection(mutex);
}
int uv_mutex_trylock(uv_mutex_t* mutex) {
if (TryEnterCriticalSection(mutex))
return 0;
else
return -1;
}
void uv_mutex_unlock(uv_mutex_t* mutex) {
LeaveCriticalSection(mutex);
}
int uv_rwlock_init(uv_rwlock_t* rwlock) {
uv__once_init();
if (HAVE_SRWLOCK_API())
return uv__rwlock_srwlock_init(rwlock);
else
return uv__rwlock_fallback_init(rwlock);
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_destroy(rwlock);
else
uv__rwlock_fallback_destroy(rwlock);
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_rdlock(rwlock);
else
uv__rwlock_fallback_rdlock(rwlock);
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
return uv__rwlock_srwlock_tryrdlock(rwlock);
else
return uv__rwlock_fallback_tryrdlock(rwlock);
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_rdunlock(rwlock);
else
uv__rwlock_fallback_rdunlock(rwlock);
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_wrlock(rwlock);
else
uv__rwlock_fallback_wrlock(rwlock);
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
return uv__rwlock_srwlock_trywrlock(rwlock);
else
return uv__rwlock_fallback_trywrlock(rwlock);
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_wrunlock(rwlock);
else
uv__rwlock_fallback_wrunlock(rwlock);
}
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
*sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
return *sem ? 0 : -1;
}
void uv_sem_destroy(uv_sem_t* sem) {
if (!CloseHandle(*sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (!ReleaseSemaphore(*sem, 1, NULL))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
DWORD r = WaitForSingleObject(*sem, 0);
if (r == WAIT_OBJECT_0)
return 0;
if (r == WAIT_TIMEOUT)
return -1;
abort();
return -1; /* Satisfy the compiler. */
}
inline static int uv__rwlock_srwlock_init(uv_rwlock_t* rwlock) {
pInitializeSRWLock(&rwlock->srwlock_);
return 0;
}
inline static void uv__rwlock_srwlock_destroy(uv_rwlock_t* rwlock) {
(void) rwlock;
}
inline static void uv__rwlock_srwlock_rdlock(uv_rwlock_t* rwlock) {
pAcquireSRWLockShared(&rwlock->srwlock_);
}
inline static int uv__rwlock_srwlock_tryrdlock(uv_rwlock_t* rwlock) {
if (pTryAcquireSRWLockShared(&rwlock->srwlock_))
return 0;
else
return -1;
}
inline static void uv__rwlock_srwlock_rdunlock(uv_rwlock_t* rwlock) {
pReleaseSRWLockShared(&rwlock->srwlock_);
}
inline static void uv__rwlock_srwlock_wrlock(uv_rwlock_t* rwlock) {
pAcquireSRWLockExclusive(&rwlock->srwlock_);
}
inline static int uv__rwlock_srwlock_trywrlock(uv_rwlock_t* rwlock) {
if (pTryAcquireSRWLockExclusive(&rwlock->srwlock_))
return 0;
else
return -1;
}
inline static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock) {
pReleaseSRWLockExclusive(&rwlock->srwlock_);
}
inline static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock) {
if (uv_mutex_init(&rwlock->fallback_.read_mutex_))
return -1;
if (uv_mutex_init(&rwlock->fallback_.write_mutex_)) {
uv_mutex_destroy(&rwlock->fallback_.read_mutex_);
return -1;
}
rwlock->fallback_.num_readers_ = 0;
return 0;
}
inline static void uv__rwlock_fallback_destroy(uv_rwlock_t* rwlock) {
uv_mutex_destroy(&rwlock->fallback_.read_mutex_);
uv_mutex_destroy(&rwlock->fallback_.write_mutex_);
}
inline static void uv__rwlock_fallback_rdlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.read_mutex_);
if (++rwlock->fallback_.num_readers_ == 1)
uv_mutex_lock(&rwlock->fallback_.write_mutex_);
uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
}
inline static int uv__rwlock_fallback_tryrdlock(uv_rwlock_t* rwlock) {
int ret;
ret = -1;
if (uv_mutex_trylock(&rwlock->fallback_.read_mutex_))
goto out;
if (rwlock->fallback_.num_readers_ == 0)
ret = uv_mutex_trylock(&rwlock->fallback_.write_mutex_);
else
ret = 0;
if (ret == 0)
rwlock->fallback_.num_readers_++;
uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
out:
return ret;
}
inline static void uv__rwlock_fallback_rdunlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.read_mutex_);
if (--rwlock->fallback_.num_readers_ == 0)
uv_mutex_unlock(&rwlock->fallback_.write_mutex_);
uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
}
inline static void uv__rwlock_fallback_wrlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.write_mutex_);
}
inline static int uv__rwlock_fallback_trywrlock(uv_rwlock_t* rwlock) {
return uv_mutex_trylock(&rwlock->fallback_.write_mutex_);
}
inline static void uv__rwlock_fallback_wrunlock(uv_rwlock_t* rwlock) {
uv_mutex_unlock(&rwlock->fallback_.write_mutex_);
}
/* This condition variable implementation is based on the SetEvent solution
* (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
* We could not use the SignalObjectAndWait solution (section 3.4) because
* it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and
* uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs.
*/
inline static int uv_cond_fallback_init(uv_cond_t* cond) {
/* Initialize the count to 0. */
cond->fallback.waiters_count = 0;
InitializeCriticalSection(&cond->fallback.waiters_count_lock);
/* Create an auto-reset event. */
cond->fallback.signal_event = CreateEvent(NULL, /* no security */
FALSE, /* auto-reset event */
FALSE, /* non-signaled initially */
NULL); /* unnamed */
if (!cond->fallback.signal_event)
goto error2;
/* Create a manual-reset event. */
cond->fallback.broadcast_event = CreateEvent(NULL, /* no security */
TRUE, /* manual-reset */
FALSE, /* non-signaled */
NULL); /* unnamed */
if (!cond->fallback.broadcast_event)
goto error;
return 0;
error:
CloseHandle(cond->fallback.signal_event);
error2:
DeleteCriticalSection(&cond->fallback.waiters_count_lock);
return -1;
}
inline static int uv_cond_condvar_init(uv_cond_t* cond) {
pInitializeConditionVariable(&cond->cond_var);
return 0;
}
int uv_cond_init(uv_cond_t* cond) {
uv__once_init();
if (HAVE_CONDVAR_API())
return uv_cond_condvar_init(cond);
else
return uv_cond_fallback_init(cond);
}
inline static void uv_cond_fallback_destroy(uv_cond_t* cond) {
if (!CloseHandle(cond->fallback.broadcast_event))
abort();
if (!CloseHandle(cond->fallback.signal_event))
abort();
DeleteCriticalSection(&cond->fallback.waiters_count_lock);
}
inline static void uv_cond_condvar_destroy(uv_cond_t* cond) {
/* nothing to do */
}
void uv_cond_destroy(uv_cond_t* cond) {
if (HAVE_CONDVAR_API())
uv_cond_condvar_destroy(cond);
else
uv_cond_fallback_destroy(cond);
}
inline static void uv_cond_fallback_signal(uv_cond_t* cond) {
int have_waiters;
/* Avoid race conditions. */
EnterCriticalSection(&cond->fallback.waiters_count_lock);
have_waiters = cond->fallback.waiters_count > 0;
LeaveCriticalSection(&cond->fallback.waiters_count_lock);
if (have_waiters)
SetEvent(cond->fallback.signal_event);
}
inline static void uv_cond_condvar_signal(uv_cond_t* cond) {
pWakeConditionVariable(&cond->cond_var);
}
void uv_cond_signal(uv_cond_t* cond) {
if (HAVE_CONDVAR_API())
uv_cond_condvar_signal(cond);
else
uv_cond_fallback_signal(cond);
}
inline static void uv_cond_fallback_broadcast(uv_cond_t* cond) {
int have_waiters;
/* Avoid race conditions. */
EnterCriticalSection(&cond->fallback.waiters_count_lock);
have_waiters = cond->fallback.waiters_count > 0;
LeaveCriticalSection(&cond->fallback.waiters_count_lock);
if (have_waiters)
SetEvent(cond->fallback.broadcast_event);
}
inline static void uv_cond_condvar_broadcast(uv_cond_t* cond) {
pWakeAllConditionVariable(&cond->cond_var);
}
void uv_cond_broadcast(uv_cond_t* cond) {
if (HAVE_CONDVAR_API())
uv_cond_condvar_broadcast(cond);
else
uv_cond_fallback_broadcast(cond);
}
inline int uv_cond_wait_helper(uv_cond_t* cond, uv_mutex_t* mutex,
DWORD dwMilliseconds) {
DWORD result;
int last_waiter;
HANDLE handles[2] = {
cond->fallback.signal_event,
cond->fallback.broadcast_event
};
/* Avoid race conditions. */
EnterCriticalSection(&cond->fallback.waiters_count_lock);
cond->fallback.waiters_count++;
LeaveCriticalSection(&cond->fallback.waiters_count_lock);
/* It's ok to release the <mutex> here since Win32 manual-reset events */
/* maintain state when used with <SetEvent>. This avoids the "lost wakeup" */
/* bug. */
uv_mutex_unlock(mutex);
/* Wait for either event to become signaled due to <uv_cond_signal> being */
/* called or <uv_cond_broadcast> being called. */
result = WaitForMultipleObjects(2, handles, FALSE, dwMilliseconds);
EnterCriticalSection(&cond->fallback.waiters_count_lock);
cond->fallback.waiters_count--;
last_waiter = result == WAIT_OBJECT_0 + 1
&& cond->fallback.waiters_count == 0;
LeaveCriticalSection(&cond->fallback.waiters_count_lock);
/* Some thread called <pthread_cond_broadcast>. */
if (last_waiter) {
/* We're the last waiter to be notified or to stop waiting, so reset the */
/* the manual-reset event. */
ResetEvent(cond->fallback.broadcast_event);
}
/* Reacquire the <mutex>. */
uv_mutex_lock(mutex);
if (result == WAIT_OBJECT_0 || result == WAIT_OBJECT_0 + 1)
return 0;
if (result == WAIT_TIMEOUT)
return -1;
abort();
return -1; /* Satisfy the compiler. */
}
inline static void uv_cond_fallback_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (uv_cond_wait_helper(cond, mutex, INFINITE))
abort();
}
inline static void uv_cond_condvar_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (!pSleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
abort();
}
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (HAVE_CONDVAR_API())
uv_cond_condvar_wait(cond, mutex);
else
uv_cond_fallback_wait(cond, mutex);
}
inline static int uv_cond_fallback_timedwait(uv_cond_t* cond,
uv_mutex_t* mutex, uint64_t timeout) {
return uv_cond_wait_helper(cond, mutex, (DWORD)(timeout / 1e6));
}
inline static int uv_cond_condvar_timedwait(uv_cond_t* cond,
uv_mutex_t* mutex, uint64_t timeout) {
if (pSleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
return 0;
if (GetLastError() != ERROR_TIMEOUT)
abort();
return -1;
}
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex,
uint64_t timeout) {
if (HAVE_CONDVAR_API())
return uv_cond_condvar_timedwait(cond, mutex, timeout);
else
return uv_cond_fallback_timedwait(cond, mutex, timeout);
}
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
barrier->n = count;
barrier->count = 0;
if (uv_mutex_init(&barrier->mutex))
return -1;
if (uv_sem_init(&barrier->turnstile1, 0))
goto error2;
if (uv_sem_init(&barrier->turnstile2, 1))
goto error;
return 0;
error:
uv_sem_destroy(&barrier->turnstile1);
error2:
uv_mutex_destroy(&barrier->mutex);
return -1;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
uv_sem_destroy(&barrier->turnstile2);
uv_sem_destroy(&barrier->turnstile1);
uv_mutex_destroy(&barrier->mutex);
}
void uv_barrier_wait(uv_barrier_t* barrier) {
uv_mutex_lock(&barrier->mutex);
if (++barrier->count == barrier->n) {
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile1);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile1);
uv_mutex_lock(&barrier->mutex);
if (--barrier->count == 0) {
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile2);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile2);
}

View File

@ -1,82 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "req-inl.h"
static void uv_work_req_init(uv_loop_t* loop, uv_work_t* req,
uv_work_cb work_cb, uv_after_work_cb after_work_cb) {
uv_req_init(loop, (uv_req_t*) req);
req->type = UV_WORK;
req->loop = loop;
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
memset(&req->overlapped, 0, sizeof(req->overlapped));
}
static DWORD WINAPI uv_work_thread_proc(void* parameter) {
uv_work_t* req = (uv_work_t*)parameter;
uv_loop_t* loop = req->loop;
assert(req != NULL);
assert(req->type == UV_WORK);
assert(req->work_cb);
req->work_cb(req);
POST_COMPLETION_FOR_REQ(loop, req);
return 0;
}
int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb,
uv_after_work_cb after_work_cb) {
if (work_cb == NULL)
return uv__set_artificial_error(loop, UV_EINVAL);
uv_work_req_init(loop, req, work_cb, after_work_cb);
if (!QueueUserWorkItem(&uv_work_thread_proc, req, WT_EXECUTELONGFUNCTION)) {
uv__set_sys_error(loop, GetLastError());
return -1;
}
uv__req_register(loop, req);
return 0;
}
int uv_cancel(uv_req_t* req) {
return -1;
}
void uv_process_work_req(uv_loop_t* loop, uv_work_t* req) {
uv__req_unregister(loop, req);
if(req->after_work_cb)
req->after_work_cb(req, 0);
}

View File

@ -1,236 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <limits.h>
#include "uv.h"
#include "internal.h"
#include "tree.h"
#include "handle-inl.h"
void uv_update_time(uv_loop_t* loop) {
DWORD ticks = GetTickCount();
/* The assumption is made that LARGE_INTEGER.QuadPart has the same type */
/* loop->time, which happens to be. Is there any way to assert this? */
LARGE_INTEGER* time = (LARGE_INTEGER*) &loop->time;
/* If the timer has wrapped, add 1 to it's high-order dword. */
/* uv_poll must make sure that the timer can never overflow more than */
/* once between two subsequent uv_update_time calls. */
if (ticks < time->LowPart) {
time->HighPart += 1;
}
time->LowPart = ticks;
}
static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
if (a->due < b->due)
return -1;
if (a->due > b->due)
return 1;
/*
* compare start_id when both has the same due. start_id is
* allocated with loop->timer_counter in uv_timer_start().
*/
if (a->start_id < b->start_id)
return -1;
if (a->start_id > b->start_id)
return 1;
return 0;
}
RB_GENERATE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_TIMER);
handle->timer_cb = NULL;
handle->repeat = 0;
return 0;
}
void uv_timer_endgame(uv_loop_t* loop, uv_timer_t* handle) {
if (handle->flags & UV__HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
}
static uint64_t get_clamped_due_time(uint64_t loop_time, uint64_t timeout) {
uint64_t clamped_timeout;
clamped_timeout = loop_time + timeout;
if (clamped_timeout < timeout)
clamped_timeout = (uint64_t) -1;
return clamped_timeout;
}
int uv_timer_start(uv_timer_t* handle, uv_timer_cb timer_cb, uint64_t timeout,
uint64_t repeat) {
uv_loop_t* loop = handle->loop;
uv_timer_t* old;
if (handle->flags & UV_HANDLE_ACTIVE) {
RB_REMOVE(uv_timer_tree_s, &loop->timers, handle);
}
handle->timer_cb = timer_cb;
handle->due = get_clamped_due_time(loop->time, timeout);
handle->repeat = repeat;
handle->flags |= UV_HANDLE_ACTIVE;
uv__handle_start(handle);
/* start_id is the second index to be compared in uv__timer_cmp() */
handle->start_id = handle->loop->timer_counter++;
old = RB_INSERT(uv_timer_tree_s, &loop->timers, handle);
assert(old == NULL);
return 0;
}
int uv_timer_stop(uv_timer_t* handle) {
uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_ACTIVE))
return 0;
RB_REMOVE(uv_timer_tree_s, &loop->timers, handle);
handle->flags &= ~UV_HANDLE_ACTIVE;
uv__handle_stop(handle);
return 0;
}
int uv_timer_again(uv_timer_t* handle) {
uv_loop_t* loop = handle->loop;
/* If timer_cb is NULL that means that the timer was never started. */
if (!handle->timer_cb) {
uv__set_sys_error(loop, ERROR_INVALID_DATA);
return -1;
}
if (handle->flags & UV_HANDLE_ACTIVE) {
RB_REMOVE(uv_timer_tree_s, &loop->timers, handle);
handle->flags &= ~UV_HANDLE_ACTIVE;
uv__handle_stop(handle);
}
if (handle->repeat) {
handle->due = get_clamped_due_time(loop->time, handle->repeat);
if (RB_INSERT(uv_timer_tree_s, &loop->timers, handle) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
handle->flags |= UV_HANDLE_ACTIVE;
uv__handle_start(handle);
}
return 0;
}
void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
assert(handle->type == UV_TIMER);
handle->repeat = repeat;
}
uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
assert(handle->type == UV_TIMER);
return handle->repeat;
}
DWORD uv_get_poll_timeout(uv_loop_t* loop) {
uv_timer_t* timer;
int64_t delta;
/* Check if there are any running timers */
timer = RB_MIN(uv_timer_tree_s, &loop->timers);
if (timer) {
uv_update_time(loop);
delta = timer->due - loop->time;
if (delta >= UINT_MAX >> 1) {
/* A timeout value of UINT_MAX means infinite, so that's no good. But */
/* more importantly, there's always the risk that GetTickCount wraps. */
/* uv_update_time can detect this, but we must make sure that the */
/* tick counter never overflows twice between two subsequent */
/* uv_update_time calls. We do this by never sleeping more than half */
/* the time it takes to wrap the counter - which is huge overkill, */
/* but hey, it's not so bad to wake up every 25 days. */
return UINT_MAX >> 1;
} else if (delta < 0) {
/* Negative timeout values are not allowed */
return 0;
} else {
return (DWORD)delta;
}
} else {
/* No timers */
return INFINITE;
}
}
void uv_process_timers(uv_loop_t* loop) {
uv_timer_t* timer;
/* Call timer callbacks */
for (timer = RB_MIN(uv_timer_tree_s, &loop->timers);
timer != NULL && timer->due <= loop->time;
timer = RB_MIN(uv_timer_tree_s, &loop->timers)) {
RB_REMOVE(uv_timer_tree_s, &loop->timers, timer);
if (timer->repeat != 0) {
/* If it is a repeating timer, reschedule with repeat timeout. */
timer->due = get_clamped_due_time(timer->due, timer->repeat);
if (timer->due < loop->time) {
timer->due = loop->time;
}
if (RB_INSERT(uv_timer_tree_s, &loop->timers, timer) != NULL) {
uv_fatal_error(ERROR_INVALID_DATA, "RB_INSERT");
}
} else {
/* If non-repeating, mark the timer as inactive. */
timer->flags &= ~UV_HANDLE_ACTIVE;
uv__handle_stop(timer);
}
timer->timer_cb((uv_timer_t*) timer, 0);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,743 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "stream-inl.h"
#include "req-inl.h"
/*
* Threshold of active udp streams for which to preallocate udp read buffers.
*/
const unsigned int uv_active_udp_streams_threshold = 0;
/* A zero-size buffer for use by uv_udp_read */
static char uv_zero_[] = "";
int uv_udp_getsockname(uv_udp_t* handle, struct sockaddr* name,
int* namelen) {
uv_loop_t* loop = handle->loop;
int result;
if (!(handle->flags & UV_HANDLE_BOUND)) {
uv__set_sys_error(loop, WSAEINVAL);
return -1;
}
result = getsockname(handle->socket, name, namelen);
if (result != 0) {
uv__set_sys_error(loop, WSAGetLastError());
return -1;
}
return 0;
}
static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
int family) {
DWORD yes = 1;
WSAPROTOCOL_INFOW info;
int opt_len;
assert(handle->socket == INVALID_SOCKET);
/* Set SO_REUSEADDR on the socket. */
if (setsockopt(socket,
SOL_SOCKET,
SO_REUSEADDR,
(char*) &yes,
sizeof yes) == SOCKET_ERROR) {
uv__set_sys_error(loop, WSAGetLastError());
return -1;
}
/* Set the socket to nonblocking mode */
if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
uv__set_sys_error(loop, WSAGetLastError());
return -1;
}
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
uv__set_sys_error(loop, GetLastError());
return -1;
}
/* Associate it with the I/O completion port. */
/* Use uv_handle_t pointer as completion key. */
if (CreateIoCompletionPort((HANDLE)socket,
loop->iocp,
(ULONG_PTR)socket,
0) == NULL) {
uv__set_sys_error(loop, GetLastError());
return -1;
}
if (pSetFileCompletionNotificationModes) {
/* All know windowses that support SetFileCompletionNotificationModes */
/* have a bug that makes it impossible to use this function in */
/* conjunction with datagram sockets. We can work around that but only */
/* if the user is using the default UDP driver (AFD) and has no other */
/* LSPs stacked on top. Here we check whether that is the case. */
opt_len = (int) sizeof info;
if (getsockopt(socket,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) &info,
&opt_len) == SOCKET_ERROR) {
uv__set_sys_error(loop, GetLastError());
return -1;
}
if (info.ProtocolChain.ChainLen == 1) {
if (pSetFileCompletionNotificationModes((HANDLE)socket,
FILE_SKIP_SET_EVENT_ON_HANDLE |
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
handle->func_wsarecv = uv_wsarecv_workaround;
handle->func_wsarecvfrom = uv_wsarecvfrom_workaround;
} else if (GetLastError() != ERROR_INVALID_FUNCTION) {
uv__set_sys_error(loop, GetLastError());
return -1;
}
}
}
handle->socket = socket;
if (family == AF_INET6) {
handle->flags |= UV_HANDLE_IPV6;
} else {
assert(!(handle->flags & UV_HANDLE_IPV6));
}
return 0;
}
int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_UDP);
handle->socket = INVALID_SOCKET;
handle->reqs_pending = 0;
handle->activecnt = 0;
handle->func_wsarecv = WSARecv;
handle->func_wsarecvfrom = WSARecvFrom;
uv_req_init(loop, (uv_req_t*) &(handle->recv_req));
handle->recv_req.type = UV_UDP_RECV;
handle->recv_req.data = handle;
return 0;
}
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
uv_udp_recv_stop(handle);
closesocket(handle->socket);
uv__handle_closing(handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
if (handle->flags & UV__HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
}
static int uv__bind(uv_udp_t* handle,
int family,
struct sockaddr* addr,
int addrsize,
unsigned int flags) {
int r;
DWORD no = 0;
if ((flags & UV_UDP_IPV6ONLY) && family != AF_INET6) {
/* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */
uv__set_artificial_error(handle->loop, UV_EINVAL);
return -1;
}
if (handle->socket == INVALID_SOCKET) {
SOCKET sock = socket(family, SOCK_DGRAM, 0);
if (sock == INVALID_SOCKET) {
uv__set_sys_error(handle->loop, WSAGetLastError());
return -1;
}
if (uv_udp_set_socket(handle->loop, handle, sock, family) < 0) {
closesocket(sock);
return -1;
}
if (family == AF_INET6)
handle->flags |= UV_HANDLE_IPV6;
}
if (family == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) {
/* On windows IPV6ONLY is on by default. */
/* If the user doesn't specify it libuv turns it off. */
/* TODO: how to handle errors? This may fail if there is no ipv4 stack */
/* available, or when run on XP/2003 which have no support for dualstack */
/* sockets. For now we're silently ignoring the error. */
setsockopt(handle->socket,
IPPROTO_IPV6,
IPV6_V6ONLY,
(char*) &no,
sizeof no);
}
r = bind(handle->socket, addr, addrsize);
if (r == SOCKET_ERROR) {
uv__set_sys_error(handle->loop, WSAGetLastError());
return -1;
}
handle->flags |= UV_HANDLE_BOUND;
return 0;
}
int uv__udp_bind(uv_udp_t* handle, struct sockaddr_in addr,
unsigned int flags) {
return uv__bind(handle,
AF_INET,
(struct sockaddr*) &addr,
sizeof(struct sockaddr_in),
flags);
}
int uv__udp_bind6(uv_udp_t* handle, struct sockaddr_in6 addr,
unsigned int flags) {
return uv__bind(handle,
AF_INET6,
(struct sockaddr*) &addr,
sizeof(struct sockaddr_in6),
flags);
}
static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
uv_req_t* req;
uv_buf_t buf;
DWORD bytes, flags;
int result;
assert(handle->flags & UV_HANDLE_READING);
assert(!(handle->flags & UV_HANDLE_READ_PENDING));
req = &handle->recv_req;
memset(&req->overlapped, 0, sizeof(req->overlapped));
/*
* Preallocate a read buffer if the number of active streams is below
* the threshold.
*/
if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
handle->flags &= ~UV_HANDLE_ZERO_READ;
handle->recv_buffer = handle->alloc_cb((uv_handle_t*) handle, 65536);
assert(handle->recv_buffer.len > 0);
buf = handle->recv_buffer;
memset(&handle->recv_from, 0, sizeof handle->recv_from);
handle->recv_from_len = sizeof handle->recv_from;
flags = 0;
result = handle->func_wsarecvfrom(handle->socket,
(WSABUF*) &buf,
1,
&bytes,
&flags,
(struct sockaddr*) &handle->recv_from,
&handle->recv_from_len,
&req->overlapped,
NULL);
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
req->overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv_insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
handle->reqs_pending++;
}
} else {
handle->flags |= UV_HANDLE_ZERO_READ;
buf.base = (char*) uv_zero_;
buf.len = 0;
flags = MSG_PEEK;
result = handle->func_wsarecv(handle->socket,
(WSABUF*) &buf,
1,
&bytes,
&flags,
&req->overlapped,
NULL);
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
req->overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv_insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
handle->reqs_pending++;
}
}
}
int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb) {
uv_loop_t* loop = handle->loop;
if (handle->flags & UV_HANDLE_READING) {
uv__set_sys_error(loop, WSAEALREADY);
return -1;
}
if (!(handle->flags & UV_HANDLE_BOUND) &&
uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) {
return -1;
}
handle->flags |= UV_HANDLE_READING;
INCREASE_ACTIVE_COUNT(loop, handle);
loop->active_udp_streams++;
handle->recv_cb = recv_cb;
handle->alloc_cb = alloc_cb;
/* If reading was stopped and then started again, there could still be a */
/* recv request pending. */
if (!(handle->flags & UV_HANDLE_READ_PENDING))
uv_udp_queue_recv(loop, handle);
return 0;
}
int uv__udp_recv_stop(uv_udp_t* handle) {
if (handle->flags & UV_HANDLE_READING) {
handle->flags &= ~UV_HANDLE_READING;
handle->loop->active_udp_streams--;
DECREASE_ACTIVE_COUNT(loop, handle);
}
return 0;
}
static int uv__send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[],
int bufcnt, struct sockaddr* addr, int addr_len, uv_udp_send_cb cb) {
uv_loop_t* loop = handle->loop;
DWORD result, bytes;
uv_req_init(loop, (uv_req_t*) req);
req->type = UV_UDP_SEND;
req->handle = handle;
req->cb = cb;
memset(&req->overlapped, 0, sizeof(req->overlapped));
result = WSASendTo(handle->socket,
(WSABUF*)bufs,
bufcnt,
&bytes,
0,
addr,
addr_len,
&req->overlapped,
NULL);
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Request completed immediately. */
req->queued_bytes = 0;
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* Request queued by the kernel. */
req->queued_bytes = uv_count_bufs(bufs, bufcnt);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
} else {
/* Send failed due to an error. */
uv__set_sys_error(loop, WSAGetLastError());
return -1;
}
return 0;
}
int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[],
int bufcnt, struct sockaddr_in addr, uv_udp_send_cb cb) {
if (!(handle->flags & UV_HANDLE_BOUND) &&
uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) {
return -1;
}
return uv__send(req,
handle,
bufs,
bufcnt,
(struct sockaddr*) &addr,
sizeof addr,
cb);
}
int uv__udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[],
int bufcnt, struct sockaddr_in6 addr, uv_udp_send_cb cb) {
if (!(handle->flags & UV_HANDLE_BOUND) &&
uv_udp_bind6(handle, uv_addr_ip6_any_, 0) < 0) {
return -1;
}
return uv__send(req,
handle,
bufs,
bufcnt,
(struct sockaddr*) &addr,
sizeof addr,
cb);
}
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
uv_req_t* req) {
uv_buf_t buf;
int partial;
assert(handle->type == UV_UDP);
handle->flags &= ~UV_HANDLE_READ_PENDING;
if (!REQ_SUCCESS(req)) {
DWORD err = GET_REQ_SOCK_ERROR(req);
if (err == WSAEMSGSIZE) {
/* Not a real error, it just indicates that the received packet */
/* was bigger than the receive buffer. */
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
/* A previous sendto operation failed; ignore this error. If */
/* zero-reading we need to call WSARecv/WSARecvFrom _without_ the */
/* MSG_PEEK flag to clear out the error queue. For nonzero reads, */
/* immediately queue a new receive. */
if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
goto done;
}
} else {
/* A real error occurred. Report the error to the user only if we're */
/* currently reading. */
if (handle->flags & UV_HANDLE_READING) {
uv__set_sys_error(loop, err);
uv_udp_recv_stop(handle);
buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
uv_buf_init(NULL, 0) : handle->recv_buffer;
handle->recv_cb(handle, -1, buf, NULL, 0);
}
goto done;
}
}
if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
/* Successful read */
partial = !REQ_SUCCESS(req);
handle->recv_cb(handle,
req->overlapped.InternalHigh,
handle->recv_buffer,
(struct sockaddr*) &handle->recv_from,
partial ? UV_UDP_PARTIAL : 0);
} else if (handle->flags & UV_HANDLE_READING) {
DWORD bytes, err, flags;
struct sockaddr_storage from;
int from_len;
/* Do a nonblocking receive */
/* TODO: try to read multiple datagrams at once. FIONREAD maybe? */
buf = handle->alloc_cb((uv_handle_t*) handle, 65536);
assert(buf.len > 0);
memset(&from, 0, sizeof from);
from_len = sizeof from;
flags = 0;
if (WSARecvFrom(handle->socket,
(WSABUF*)&buf,
1,
&bytes,
&flags,
(struct sockaddr*) &from,
&from_len,
NULL,
NULL) != SOCKET_ERROR) {
/* Message received */
handle->recv_cb(handle, bytes, buf, (struct sockaddr*) &from, 0);
} else {
err = WSAGetLastError();
if (err == WSAEMSGSIZE) {
/* Message truncated */
handle->recv_cb(handle,
bytes,
buf,
(struct sockaddr*) &from,
UV_UDP_PARTIAL);
} if (err == WSAEWOULDBLOCK) {
/* Kernel buffer empty */
uv__set_sys_error(loop, WSAEWOULDBLOCK);
handle->recv_cb(handle, 0, buf, NULL, 0);
} else if (err != WSAECONNRESET && err != WSAENETRESET) {
/* Serious error. WSAECONNRESET/WSANETRESET is ignored because this */
/* just indicates that a previous sendto operation failed. */
uv_udp_recv_stop(handle);
uv__set_sys_error(loop, err);
handle->recv_cb(handle, -1, buf, NULL, 0);
}
}
}
done:
/* Post another read if still reading and not closing. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_udp_queue_recv(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req) {
assert(handle->type == UV_UDP);
UNREGISTER_HANDLE_REQ(loop, handle, req);
if (req->cb) {
if (REQ_SUCCESS(req)) {
req->cb(req, 0);
} else {
uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req));
req->cb(req, -1);
}
}
DECREASE_PENDING_REQ_COUNT(handle);
}
int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr,
const char* interface_addr, uv_membership membership) {
int optname;
struct ip_mreq mreq;
/* If the socket is unbound, bind to inaddr_any. */
if (!(handle->flags & UV_HANDLE_BOUND) &&
uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) {
return -1;
}
if (handle->flags & UV_HANDLE_IPV6) {
uv__set_artificial_error(handle->loop, UV_ENOSYS);
return -1;
}
memset(&mreq, 0, sizeof mreq);
if (interface_addr) {
mreq.imr_interface.s_addr = inet_addr(interface_addr);
} else {
mreq.imr_interface.s_addr = htonl(INADDR_ANY);
}
mreq.imr_multiaddr.s_addr = inet_addr(multicast_addr);
switch (membership) {
case UV_JOIN_GROUP:
optname = IP_ADD_MEMBERSHIP;
break;
case UV_LEAVE_GROUP:
optname = IP_DROP_MEMBERSHIP;
break;
default:
return uv__set_artificial_error(handle->loop, UV_EINVAL);
}
if (setsockopt(handle->socket,
IPPROTO_IP,
optname,
(char*) &mreq,
sizeof mreq) == SOCKET_ERROR) {
uv__set_sys_error(handle->loop, WSAGetLastError());
return -1;
}
return 0;
}
int uv_udp_set_broadcast(uv_udp_t* handle, int value) {
BOOL optval = (BOOL) value;
/* If the socket is unbound, bind to inaddr_any. */
if (!(handle->flags & UV_HANDLE_BOUND) &&
uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) {
return -1;
}
if (setsockopt(handle->socket,
SOL_SOCKET,
SO_BROADCAST,
(char*) &optval,
sizeof optval)) {
uv__set_sys_error(handle->loop, WSAGetLastError());
return -1;
}
return 0;
}
int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
WSAPROTOCOL_INFOW protocol_info;
int opt_len;
/* Detect the address family of the socket. */
opt_len = (int) sizeof protocol_info;
if (getsockopt(sock,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) &protocol_info,
&opt_len) == SOCKET_ERROR) {
uv__set_sys_error(handle->loop, GetLastError());
return -1;
}
if (uv_udp_set_socket(handle->loop,
handle,
sock,
protocol_info.iAddressFamily) < 0) {
return -1;
}
return 0;
}
#define SOCKOPT_SETTER(name, option4, option6, validate) \
int uv_udp_set_##name(uv_udp_t* handle, int value) { \
DWORD optval = (DWORD) value; \
\
if (!(validate(value))) { \
uv__set_artificial_error(handle->loop, UV_EINVAL); \
return -1; \
} \
\
/* If the socket is unbound, bind to inaddr_any. */ \
if (!(handle->flags & UV_HANDLE_BOUND) && \
uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) { \
return -1; \
} \
\
if (!(handle->flags & UV_HANDLE_IPV6)) { \
/* Set IPv4 socket option */ \
if (setsockopt(handle->socket, \
IPPROTO_IP, \
option4, \
(char*) &optval, \
sizeof optval)) { \
uv__set_sys_error(handle->loop, WSAGetLastError()); \
return -1; \
} \
} else { \
/* Set IPv6 socket option */ \
if (setsockopt(handle->socket, \
IPPROTO_IPV6, \
option6, \
(char*) &optval, \
sizeof optval)) { \
uv__set_sys_error(handle->loop, WSAGetLastError()); \
return -1; \
} \
} \
return 0; \
}
#define VALIDATE_TTL(value) ((value) >= 1 && (value) <= 255)
#define VALIDATE_MULTICAST_TTL(value) ((value) >= -1 && (value) <= 255)
#define VALIDATE_MULTICAST_LOOP(value) (1)
SOCKOPT_SETTER(ttl,
IP_TTL,
IPV6_HOPLIMIT,
VALIDATE_TTL)
SOCKOPT_SETTER(multicast_ttl,
IP_MULTICAST_TTL,
IPV6_MULTICAST_HOPS,
VALIDATE_MULTICAST_TTL)
SOCKOPT_SETTER(multicast_loop,
IP_MULTICAST_LOOP,
IPV6_MULTICAST_LOOP,
VALIDATE_MULTICAST_LOOP)
#undef SOCKOPT_SETTER
#undef VALIDATE_TTL
#undef VALIDATE_MULTICAST_TTL
#undef VALIDATE_MULTICAST_LOOP

View File

@ -1,956 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <direct.h>
#include <limits.h>
#include <malloc.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <wchar.h>
#include "uv.h"
#include "internal.h"
#include <iphlpapi.h>
#include <psapi.h>
#include <tlhelp32.h>
/*
* Max title length; the only thing MSDN tells us about the maximum length
* of the console title is that it is smaller than 64K. However in practice
* it is much smaller, and there is no way to figure out what the exact length
* of the title is or can be, at least not on XP. To make it even more
* annoying, GetConsoleTitle failes when the buffer to be read into is bigger
* than the actual maximum length. So we make a conservative guess here;
* just don't put the novel you're writing in the title, unless the plot
* survives truncation.
*/
#define MAX_TITLE_LENGTH 8192
/* The number of nanoseconds in one second. */
#undef NANOSEC
#define NANOSEC 1000000000
/* Cached copy of the process title, plus a mutex guarding it. */
static char *process_title;
static CRITICAL_SECTION process_title_lock;
/* The tick frequency of the high-resolution clock. */
static uint64_t hrtime_frequency_ = 0;
/*
* One-time intialization code for functionality defined in util.c.
*/
void uv__util_init() {
/* Initialize process title access mutex. */
InitializeCriticalSection(&process_title_lock);
/* Retrieve high-resolution timer frequency. */
if (!QueryPerformanceFrequency((LARGE_INTEGER*) &hrtime_frequency_))
hrtime_frequency_ = 0;
}
int uv_utf16_to_utf8(const WCHAR* utf16Buffer, size_t utf16Size,
char* utf8Buffer, size_t utf8Size) {
return WideCharToMultiByte(CP_UTF8,
0,
utf16Buffer,
utf16Size,
utf8Buffer,
utf8Size,
NULL,
NULL);
}
int uv_utf8_to_utf16(const char* utf8Buffer, WCHAR* utf16Buffer,
size_t utf16Size) {
return MultiByteToWideChar(CP_UTF8,
0,
utf8Buffer,
-1,
utf16Buffer,
utf16Size);
}
int uv_exepath(char* buffer, size_t* size_ptr) {
int utf8_len, utf16_buffer_len, utf16_len;
WCHAR* utf16_buffer;
if (buffer == NULL || size_ptr == NULL || *size_ptr == 0) {
return -1;
}
if (*size_ptr > 32768) {
/* Windows paths can never be longer than this. */
utf16_buffer_len = 32768;
} else {
utf16_buffer_len = (int) *size_ptr;
}
utf16_buffer = (WCHAR*) malloc(sizeof(WCHAR) * utf16_buffer_len);
if (!utf16_buffer) {
return -1;
}
/* Get the path as UTF-16. */
utf16_len = GetModuleFileNameW(NULL, utf16_buffer, utf16_buffer_len);
if (utf16_len <= 0) {
goto error;
}
/* utf16_len contains the length, *not* including the terminating null. */
utf16_buffer[utf16_len] = L'\0';
/* Convert to UTF-8 */
utf8_len = WideCharToMultiByte(CP_UTF8,
0,
utf16_buffer,
-1,
buffer,
*size_ptr > INT_MAX ? INT_MAX : (int) *size_ptr,
NULL,
NULL);
if (utf8_len == 0) {
goto error;
}
free(utf16_buffer);
/* utf8_len *does* include the terminating null at this point, but the */
/* returned size shouldn't. */
*size_ptr = utf8_len - 1;
return 0;
error:
free(utf16_buffer);
return -1;
}
uv_err_t uv_cwd(char* buffer, size_t size) {
DWORD utf16_len;
WCHAR utf16_buffer[MAX_PATH];
int r;
if (buffer == NULL || size == 0) {
return uv__new_artificial_error(UV_EINVAL);
}
utf16_len = GetCurrentDirectoryW(MAX_PATH, utf16_buffer);
if (utf16_len == 0) {
return uv__new_sys_error(GetLastError());
} else if (utf16_len > MAX_PATH) {
/* This should be impossible; however the CRT has a code path to deal */
/* with this scenario, so I added a check anyway. */
return uv__new_artificial_error(UV_EIO);
}
/* utf16_len contains the length, *not* including the terminating null. */
utf16_buffer[utf16_len] = L'\0';
/* The returned directory should not have a trailing slash, unless it */
/* points at a drive root, like c:\. Remove it if needed.*/
if (utf16_buffer[utf16_len - 1] == L'\\' &&
!(utf16_len == 3 && utf16_buffer[1] == L':')) {
utf16_len--;
utf16_buffer[utf16_len] = L'\0';
}
/* Convert to UTF-8 */
r = WideCharToMultiByte(CP_UTF8,
0,
utf16_buffer,
-1,
buffer,
size > INT_MAX ? INT_MAX : (int) size,
NULL,
NULL);
if (r == 0) {
return uv__new_sys_error(GetLastError());
}
return uv_ok_;
}
uv_err_t uv_chdir(const char* dir) {
WCHAR utf16_buffer[MAX_PATH];
size_t utf16_len;
WCHAR drive_letter, env_var[4];
if (dir == NULL) {
return uv__new_artificial_error(UV_EINVAL);
}
if (MultiByteToWideChar(CP_UTF8,
0,
dir,
-1,
utf16_buffer,
MAX_PATH) == 0) {
DWORD error = GetLastError();
/* The maximum length of the current working directory is 260 chars, */
/* including terminating null. If it doesn't fit, the path name must be */
/* too long. */
if (error == ERROR_INSUFFICIENT_BUFFER) {
return uv__new_artificial_error(UV_ENAMETOOLONG);
} else {
return uv__new_sys_error(error);
}
}
if (!SetCurrentDirectoryW(utf16_buffer)) {
return uv__new_sys_error(GetLastError());
}
/* Windows stores the drive-local path in an "hidden" environment variable, */
/* which has the form "=C:=C:\Windows". SetCurrentDirectory does not */
/* update this, so we'll have to do it. */
utf16_len = GetCurrentDirectoryW(MAX_PATH, utf16_buffer);
if (utf16_len == 0) {
return uv__new_sys_error(GetLastError());
} else if (utf16_len > MAX_PATH) {
return uv__new_artificial_error(UV_EIO);
}
/* The returned directory should not have a trailing slash, unless it */
/* points at a drive root, like c:\. Remove it if needed. */
if (utf16_buffer[utf16_len - 1] == L'\\' &&
!(utf16_len == 3 && utf16_buffer[1] == L':')) {
utf16_len--;
utf16_buffer[utf16_len] = L'\0';
}
if (utf16_len < 2 || utf16_buffer[1] != L':') {
/* Doesn't look like a drive letter could be there - probably an UNC */
/* path. TODO: Need to handle win32 namespaces like \\?\C:\ ? */
drive_letter = 0;
} else if (utf16_buffer[0] >= L'A' && utf16_buffer[0] <= L'Z') {
drive_letter = utf16_buffer[0];
} else if (utf16_buffer[0] >= L'a' && utf16_buffer[0] <= L'z') {
/* Convert to uppercase. */
drive_letter = utf16_buffer[0] - L'a' + L'A';
} else {
/* Not valid. */
drive_letter = 0;
}
if (drive_letter != 0) {
/* Construct the environment variable name and set it. */
env_var[0] = L'=';
env_var[1] = drive_letter;
env_var[2] = L':';
env_var[3] = L'\0';
if (!SetEnvironmentVariableW(env_var, utf16_buffer)) {
return uv__new_sys_error(GetLastError());
}
}
return uv_ok_;
}
void uv_loadavg(double avg[3]) {
/* Can't be implemented */
avg[0] = avg[1] = avg[2] = 0;
}
uint64_t uv_get_free_memory(void) {
MEMORYSTATUSEX memory_status;
memory_status.dwLength = sizeof(memory_status);
if(!GlobalMemoryStatusEx(&memory_status))
{
return -1;
}
return (uint64_t)memory_status.ullAvailPhys;
}
uint64_t uv_get_total_memory(void) {
MEMORYSTATUSEX memory_status;
memory_status.dwLength = sizeof(memory_status);
if(!GlobalMemoryStatusEx(&memory_status))
{
return -1;
}
return (uint64_t)memory_status.ullTotalPhys;
}
int uv_parent_pid() {
int parent_pid = -1;
HANDLE handle;
PROCESSENTRY32 pe;
int current_pid = GetCurrentProcessId();
pe.dwSize = sizeof(PROCESSENTRY32);
handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
if (Process32First(handle, &pe)) {
do {
if (pe.th32ProcessID == current_pid) {
parent_pid = pe.th32ParentProcessID;
break;
}
} while( Process32Next(handle, &pe));
}
CloseHandle(handle);
return parent_pid;
}
char** uv_setup_args(int argc, char** argv) {
return argv;
}
uv_err_t uv_set_process_title(const char* title) {
uv_err_t err;
int length;
WCHAR* title_w = NULL;
uv__once_init();
/* Find out how big the buffer for the wide-char title must be */
length = uv_utf8_to_utf16(title, NULL, 0);
if (!length) {
err = uv__new_sys_error(GetLastError());
goto done;
}
/* Convert to wide-char string */
title_w = (WCHAR*)malloc(sizeof(WCHAR) * length);
if (!title_w) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
length = uv_utf8_to_utf16(title, title_w, length);
if (!length) {
err = uv__new_sys_error(GetLastError());
goto done;
};
/* If the title must be truncated insert a \0 terminator there */
if (length > MAX_TITLE_LENGTH) {
title_w[MAX_TITLE_LENGTH - 1] = L'\0';
}
if (!SetConsoleTitleW(title_w)) {
err = uv__new_sys_error(GetLastError());
goto done;
}
EnterCriticalSection(&process_title_lock);
free(process_title);
process_title = strdup(title);
LeaveCriticalSection(&process_title_lock);
err = uv_ok_;
done:
free(title_w);
return err;
}
static int uv__get_process_title() {
WCHAR title_w[MAX_TITLE_LENGTH];
int length;
if (!GetConsoleTitleW(title_w, sizeof(title_w) / sizeof(WCHAR))) {
return -1;
}
/* Find out what the size of the buffer is that we need */
length = uv_utf16_to_utf8(title_w, -1, NULL, 0);
if (!length) {
return -1;
}
assert(!process_title);
process_title = (char*)malloc(length);
if (!process_title) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
/* Do utf16 -> utf8 conversion here */
if (!uv_utf16_to_utf8(title_w, -1, process_title, length)) {
free(process_title);
return -1;
}
return 0;
}
uv_err_t uv_get_process_title(char* buffer, size_t size) {
uv__once_init();
EnterCriticalSection(&process_title_lock);
/*
* If the process_title was never read before nor explicitly set,
* we must query it with getConsoleTitleW
*/
if (!process_title && uv__get_process_title() == -1) {
return uv__new_sys_error(GetLastError());
}
assert(process_title);
strncpy(buffer, process_title, size);
LeaveCriticalSection(&process_title_lock);
return uv_ok_;
}
uint64_t uv_hrtime(void) {
LARGE_INTEGER counter;
uv__once_init();
/* If the performance frequency is zero, there's no support. */
if (!hrtime_frequency_) {
/* uv__set_sys_error(loop, ERROR_NOT_SUPPORTED); */
return 0;
}
if (!QueryPerformanceCounter(&counter)) {
/* uv__set_sys_error(loop, GetLastError()); */
return 0;
}
/* Because we have no guarantee about the order of magnitude of the */
/* performance counter frequency, and there may not be much headroom to */
/* multiply by NANOSEC without overflowing, we use 128-bit math instead. */
return ((uint64_t) counter.LowPart * NANOSEC / hrtime_frequency_) +
(((uint64_t) counter.HighPart * NANOSEC / hrtime_frequency_)
<< 32);
}
uv_err_t uv_resident_set_memory(size_t* rss) {
HANDLE current_process;
PROCESS_MEMORY_COUNTERS pmc;
current_process = GetCurrentProcess();
if (!GetProcessMemoryInfo(current_process, &pmc, sizeof(pmc))) {
return uv__new_sys_error(GetLastError());
}
*rss = pmc.WorkingSetSize;
return uv_ok_;
}
uv_err_t uv_uptime(double* uptime) {
BYTE stack_buffer[4096];
BYTE* malloced_buffer = NULL;
BYTE* buffer = (BYTE*) stack_buffer;
size_t buffer_size = sizeof(stack_buffer);
DWORD data_size;
PERF_DATA_BLOCK* data_block;
PERF_OBJECT_TYPE* object_type;
PERF_COUNTER_DEFINITION* counter_definition;
DWORD i;
for (;;) {
LONG result;
data_size = (DWORD) buffer_size;
result = RegQueryValueExW(HKEY_PERFORMANCE_DATA,
L"2",
NULL,
NULL,
buffer,
&data_size);
if (result == ERROR_SUCCESS) {
break;
} else if (result != ERROR_MORE_DATA) {
*uptime = 0;
return uv__new_sys_error(result);
}
free(malloced_buffer);
buffer_size *= 2;
/* Don't let the buffer grow infinitely. */
if (buffer_size > 1 << 20) {
goto internalError;
}
buffer = malloced_buffer = (BYTE*) malloc(buffer_size);
if (malloced_buffer == NULL) {
*uptime = 0;
return uv__new_artificial_error(UV_ENOMEM);
}
}
if (data_size < sizeof(*data_block))
goto internalError;
data_block = (PERF_DATA_BLOCK*) buffer;
if (wmemcmp(data_block->Signature, L"PERF", 4) != 0)
goto internalError;
if (data_size < data_block->HeaderLength + sizeof(*object_type))
goto internalError;
object_type = (PERF_OBJECT_TYPE*) (buffer + data_block->HeaderLength);
if (object_type->NumInstances != PERF_NO_INSTANCES)
goto internalError;
counter_definition = (PERF_COUNTER_DEFINITION*) (buffer +
data_block->HeaderLength + object_type->HeaderLength);
for (i = 0; i < object_type->NumCounters; i++) {
if ((BYTE*) counter_definition + sizeof(*counter_definition) >
buffer + data_size) {
break;
}
if (counter_definition->CounterNameTitleIndex == 674 &&
counter_definition->CounterSize == sizeof(uint64_t)) {
if (counter_definition->CounterOffset + sizeof(uint64_t) > data_size ||
!(counter_definition->CounterType & PERF_OBJECT_TIMER)) {
goto internalError;
} else {
BYTE* address = (BYTE*) object_type + object_type->DefinitionLength +
counter_definition->CounterOffset;
uint64_t value = *((uint64_t*) address);
*uptime = (double) (object_type->PerfTime.QuadPart - value) /
(double) object_type->PerfFreq.QuadPart;
free(malloced_buffer);
return uv_ok_;
}
}
counter_definition = (PERF_COUNTER_DEFINITION*)
((BYTE*) counter_definition + counter_definition->ByteLength);
}
/* If we get here, the uptime value was not found. */
free(malloced_buffer);
*uptime = 0;
return uv__new_artificial_error(UV_ENOSYS);
internalError:
free(malloced_buffer);
*uptime = 0;
return uv__new_artificial_error(UV_EIO);
}
uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos_ptr, int* cpu_count_ptr) {
uv_cpu_info_t* cpu_infos;
SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION* sppi;
DWORD sppi_size;
SYSTEM_INFO system_info;
DWORD cpu_count, r, i;
NTSTATUS status;
ULONG result_size;
uv_err_t err;
uv_cpu_info_t* cpu_info;
cpu_infos = NULL;
cpu_count = 0;
sppi = NULL;
uv__once_init();
GetSystemInfo(&system_info);
cpu_count = system_info.dwNumberOfProcessors;
cpu_infos = calloc(cpu_count, sizeof *cpu_infos);
if (cpu_infos == NULL) {
err = uv__new_artificial_error(UV_ENOMEM);
goto error;
}
sppi_size = cpu_count * sizeof(*sppi);
sppi = malloc(sppi_size);
if (sppi == NULL) {
err = uv__new_artificial_error(UV_ENOMEM);
goto error;
}
status = pNtQuerySystemInformation(SystemProcessorPerformanceInformation,
sppi,
sppi_size,
&result_size);
if (!NT_SUCCESS(status)) {
err = uv__new_sys_error(pRtlNtStatusToDosError(status));
goto error;
}
assert(result_size == sppi_size);
for (i = 0; i < cpu_count; i++) {
WCHAR key_name[128];
HKEY processor_key;
DWORD cpu_speed;
DWORD cpu_speed_size = sizeof(cpu_speed);
WCHAR cpu_brand[256];
DWORD cpu_brand_size = sizeof(cpu_brand);
int len;
len = _snwprintf(key_name,
ARRAY_SIZE(key_name),
L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\%d",
i);
assert(len > 0 && len < ARRAY_SIZE(key_name));
r = RegOpenKeyExW(HKEY_LOCAL_MACHINE,
key_name,
0,
KEY_QUERY_VALUE,
&processor_key);
if (r != ERROR_SUCCESS) {
err = uv__new_sys_error(GetLastError());
goto error;
}
if (RegQueryValueExW(processor_key,
L"~MHz",
NULL,
NULL,
(BYTE*) &cpu_speed,
&cpu_speed_size) != ERROR_SUCCESS) {
err = uv__new_sys_error(GetLastError());
RegCloseKey(processor_key);
goto error;
}
if (RegQueryValueExW(processor_key,
L"ProcessorNameString",
NULL,
NULL,
(BYTE*) &cpu_brand,
&cpu_brand_size) != ERROR_SUCCESS) {
err = uv__new_sys_error(GetLastError());
RegCloseKey(processor_key);
goto error;
}
RegCloseKey(processor_key);
cpu_info = &cpu_infos[i];
cpu_info->speed = cpu_speed;
cpu_info->cpu_times.user = sppi[i].UserTime.QuadPart / 10000;
cpu_info->cpu_times.sys = (sppi[i].KernelTime.QuadPart -
sppi[i].IdleTime.QuadPart) / 10000;
cpu_info->cpu_times.idle = sppi[i].IdleTime.QuadPart / 10000;
cpu_info->cpu_times.irq = sppi[i].InterruptTime.QuadPart / 10000;
cpu_info->cpu_times.nice = 0;
len = WideCharToMultiByte(CP_UTF8,
0,
cpu_brand,
cpu_brand_size / sizeof(WCHAR),
NULL,
0,
NULL,
NULL);
if (len == 0) {
err = uv__new_sys_error(GetLastError());
goto error;
}
assert(len > 0);
/* Allocate 1 extra byte for the null terminator. */
cpu_info->model = malloc(len + 1);
if (cpu_info->model == NULL) {
err = uv__new_artificial_error(UV_ENOMEM);
goto error;
}
if (WideCharToMultiByte(CP_UTF8,
0,
cpu_brand,
cpu_brand_size / sizeof(WCHAR),
cpu_info->model,
len,
NULL,
NULL) == 0) {
err = uv__new_sys_error(GetLastError());
goto error;
}
/* Ensure that cpu_info->model is null terminated. */
cpu_info->model[len] = '\0';
}
free(sppi);
*cpu_count_ptr = cpu_count;
*cpu_infos_ptr = cpu_infos;
return uv_ok_;
error:
/* This is safe because the cpu_infos array is zeroed on allocation. */
for (i = 0; i < cpu_count; i++)
free(cpu_infos[i].model);
free(cpu_infos);
free(sppi);
return err;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
uv_err_t uv_interface_addresses(uv_interface_address_t** addresses_ptr,
int* count_ptr) {
IP_ADAPTER_ADDRESSES* win_address_buf;
ULONG win_address_buf_size;
IP_ADAPTER_ADDRESSES* win_address;
uv_interface_address_t* uv_address_buf;
char* name_buf;
size_t uv_address_buf_size;
uv_interface_address_t* uv_address;
int count;
/* Fetch the size of the adapters reported by windows, and then get the */
/* list itself. */
win_address_buf_size = 0;
win_address_buf = NULL;
for (;;) {
ULONG r;
/* If win_address_buf is 0, then GetAdaptersAddresses will fail with */
/* ERROR_BUFFER_OVERFLOW, and the required buffer size will be stored in */
/* win_address_buf_size. */
r = GetAdaptersAddresses(AF_UNSPEC,
0,
NULL,
win_address_buf,
&win_address_buf_size);
if (r == ERROR_SUCCESS)
break;
free(win_address_buf);
switch (r) {
case ERROR_BUFFER_OVERFLOW:
/* This happens when win_address_buf is NULL or too small to hold */
/* all adapters. */
win_address_buf = malloc(win_address_buf_size);
if (win_address_buf == NULL)
return uv__new_artificial_error(UV_ENOMEM);
continue;
case ERROR_NO_DATA: {
/* No adapters were found. */
uv_address_buf = malloc(1);
if (uv_address_buf == NULL)
return uv__new_artificial_error(UV_ENOMEM);
*count_ptr = 0;
*addresses_ptr = uv_address_buf;
return uv_ok_;
}
case ERROR_ADDRESS_NOT_ASSOCIATED:
return uv__new_artificial_error(UV_EAGAIN);
case ERROR_INVALID_PARAMETER:
/* MSDN says:
* "This error is returned for any of the following conditions: the
* SizePointer parameter is NULL, the Address parameter is not
* AF_INET, AF_INET6, or AF_UNSPEC, or the address information for
* the parameters requested is greater than ULONG_MAX."
* Since the first two conditions are not met, it must be that the
* adapter data is too big.
*/
return uv__new_artificial_error(UV_ENOBUFS);
default:
/* Other (unspecified) errors can happen, but we don't have any */
/* special meaning for them. */
assert(r != ERROR_SUCCESS);
return uv__new_sys_error(r);
}
}
/* Count the number of enabled interfaces and compute how much space is */
/* needed to store their info. */
count = 0;
uv_address_buf_size = 0;
for (win_address = win_address_buf;
win_address != NULL;
win_address = win_address->Next) {
/* Use IP_ADAPTER_UNICAST_ADDRESS_XP to retain backwards compatibility */
/* with Windows XP */
IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address;
int name_size;
/* Interfaces that are not 'up' should not be reported. Also skip */
/* interfaces that have no associated unicast address, as to avoid */
/* allocating space for the name for this interface. */
if (win_address->OperStatus != IfOperStatusUp ||
win_address->FirstUnicastAddress == NULL)
continue;
/* Compute the size of the interface name. */
name_size = WideCharToMultiByte(CP_UTF8,
0,
win_address->FriendlyName,
-1,
NULL,
0,
NULL,
FALSE);
if (name_size <= 0) {
free(win_address_buf);
return uv__new_sys_error(GetLastError());
}
uv_address_buf_size += name_size;
/* Count the number of addresses associated with this interface, and */
/* compute the size. */
for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*)
win_address->FirstUnicastAddress;
unicast_address != NULL;
unicast_address = unicast_address->Next) {
count++;
uv_address_buf_size += sizeof(uv_interface_address_t);
}
}
/* Allocate space to store interface data plus adapter names. */
uv_address_buf = malloc(uv_address_buf_size);
if (uv_address_buf == NULL) {
free(win_address_buf);
return uv__new_artificial_error(UV_ENOMEM);
}
/* Compute the start of the uv_interface_address_t array, and the place in */
/* the buffer where the interface names will be stored. */
uv_address = uv_address_buf;
name_buf = (char*) (uv_address_buf + count);
/* Fill out the output buffer. */
for (win_address = win_address_buf;
win_address != NULL;
win_address = win_address->Next) {
IP_ADAPTER_UNICAST_ADDRESS_XP* unicast_address;
int name_size;
size_t max_name_size;
if (win_address->OperStatus != IfOperStatusUp ||
win_address->FirstUnicastAddress == NULL)
continue;
/* Convert the interface name to UTF8. */
max_name_size = (char*) uv_address_buf + uv_address_buf_size - name_buf;
if (max_name_size > (size_t) INT_MAX)
max_name_size = INT_MAX;
name_size = WideCharToMultiByte(CP_UTF8,
0,
win_address->FriendlyName,
-1,
name_buf,
(int) max_name_size,
NULL,
FALSE);
if (name_size <= 0) {
free(win_address_buf);
free(uv_address_buf);
return uv__new_sys_error(GetLastError());
}
/* Add an uv_interface_address_t element for every unicast address. */
for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS_XP*)
win_address->FirstUnicastAddress;
unicast_address != NULL;
unicast_address = unicast_address->Next) {
struct sockaddr* sa;
uv_address->name = name_buf;
sa = unicast_address->Address.lpSockaddr;
if (sa->sa_family == AF_INET6)
uv_address->address.address6 = *((struct sockaddr_in6 *) sa);
else
uv_address->address.address4 = *((struct sockaddr_in *) sa);
uv_address->is_internal =
(win_address->IfType == IF_TYPE_SOFTWARE_LOOPBACK);
uv_address++;
}
name_buf += name_size;
}
free(win_address_buf);
*addresses_ptr = uv_address_buf;
*count_ptr = count;
return uv_ok_;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
free(addresses);
}

View File

@ -1,152 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
/* Ntdll function pointers */
sRtlNtStatusToDosError pRtlNtStatusToDosError;
sNtDeviceIoControlFile pNtDeviceIoControlFile;
sNtQueryInformationFile pNtQueryInformationFile;
sNtSetInformationFile pNtSetInformationFile;
sNtQuerySystemInformation pNtQuerySystemInformation;
/* Kernel32 function pointers */
sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
sSetFileCompletionNotificationModes pSetFileCompletionNotificationModes;
sCreateSymbolicLinkW pCreateSymbolicLinkW;
sCancelIoEx pCancelIoEx;
sInitializeSRWLock pInitializeSRWLock;
sAcquireSRWLockShared pAcquireSRWLockShared;
sAcquireSRWLockExclusive pAcquireSRWLockExclusive;
sTryAcquireSRWLockShared pTryAcquireSRWLockShared;
sTryAcquireSRWLockExclusive pTryAcquireSRWLockExclusive;
sReleaseSRWLockShared pReleaseSRWLockShared;
sReleaseSRWLockExclusive pReleaseSRWLockExclusive;
sInitializeConditionVariable pInitializeConditionVariable;
sSleepConditionVariableCS pSleepConditionVariableCS;
sSleepConditionVariableSRW pSleepConditionVariableSRW;
sWakeAllConditionVariable pWakeAllConditionVariable;
sWakeConditionVariable pWakeConditionVariable;
void uv_winapi_init() {
HMODULE ntdll_module;
HMODULE kernel32_module;
ntdll_module = GetModuleHandleA("ntdll.dll");
if (ntdll_module == NULL) {
uv_fatal_error(GetLastError(), "GetModuleHandleA");
}
pRtlNtStatusToDosError = (sRtlNtStatusToDosError) GetProcAddress(
ntdll_module,
"RtlNtStatusToDosError");
if (pRtlNtStatusToDosError == NULL) {
uv_fatal_error(GetLastError(), "GetProcAddress");
}
pNtQueryInformationFile = (sNtQueryInformationFile) GetProcAddress(
ntdll_module,
"NtQueryInformationFile");
if (pNtQueryInformationFile == NULL) {
uv_fatal_error(GetLastError(), "GetProcAddress");
}
pNtDeviceIoControlFile = (sNtDeviceIoControlFile) GetProcAddress(
ntdll_module,
"NtDeviceIoControlFile");
if (pNtDeviceIoControlFile == NULL) {
uv_fatal_error(GetLastError(), "GetProcAddress");
}
pNtSetInformationFile = (sNtSetInformationFile) GetProcAddress(
ntdll_module,
"NtSetInformationFile");
if (pNtSetInformationFile == NULL) {
uv_fatal_error(GetLastError(), "GetProcAddress");
}
pNtQuerySystemInformation = (sNtQuerySystemInformation) GetProcAddress(
ntdll_module,
"NtQuerySystemInformation");
if (pNtQuerySystemInformation == NULL) {
uv_fatal_error(GetLastError(), "GetProcAddress");
}
kernel32_module = GetModuleHandleA("kernel32.dll");
if (kernel32_module == NULL) {
uv_fatal_error(GetLastError(), "GetModuleHandleA");
}
pGetQueuedCompletionStatusEx = (sGetQueuedCompletionStatusEx) GetProcAddress(
kernel32_module,
"GetQueuedCompletionStatusEx");
pSetFileCompletionNotificationModes = (sSetFileCompletionNotificationModes)
GetProcAddress(kernel32_module, "SetFileCompletionNotificationModes");
pCreateSymbolicLinkW = (sCreateSymbolicLinkW)
GetProcAddress(kernel32_module, "CreateSymbolicLinkW");
pCancelIoEx = (sCancelIoEx)
GetProcAddress(kernel32_module, "CancelIoEx");
pInitializeSRWLock = (sInitializeSRWLock)
GetProcAddress(kernel32_module, "InitializeSRWLock");
pAcquireSRWLockShared = (sAcquireSRWLockShared)
GetProcAddress(kernel32_module, "AcquireSRWLockShared");
pAcquireSRWLockExclusive = (sAcquireSRWLockExclusive)
GetProcAddress(kernel32_module, "AcquireSRWLockExclusive");
pTryAcquireSRWLockShared = (sTryAcquireSRWLockShared)
GetProcAddress(kernel32_module, "TryAcquireSRWLockShared");
pTryAcquireSRWLockExclusive = (sTryAcquireSRWLockExclusive)
GetProcAddress(kernel32_module, "TryAcquireSRWLockExclusive");
pReleaseSRWLockShared = (sReleaseSRWLockShared)
GetProcAddress(kernel32_module, "ReleaseSRWLockShared");
pReleaseSRWLockExclusive = (sReleaseSRWLockExclusive)
GetProcAddress(kernel32_module, "ReleaseSRWLockExclusive");
pInitializeConditionVariable = (sInitializeConditionVariable)
GetProcAddress(kernel32_module, "InitializeConditionVariable");
pSleepConditionVariableCS = (sSleepConditionVariableCS)
GetProcAddress(kernel32_module, "SleepConditionVariableCS");
pSleepConditionVariableSRW = (sSleepConditionVariableSRW)
GetProcAddress(kernel32_module, "SleepConditionVariableSRW");
pWakeAllConditionVariable = (sWakeAllConditionVariable)
GetProcAddress(kernel32_module, "WakeAllConditionVariable");
pWakeConditionVariable = (sWakeConditionVariable)
GetProcAddress(kernel32_module, "WakeConditionVariable");
}

File diff suppressed because it is too large Load Diff

View File

@ -1,554 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
/* Whether there are any non-IFS LSPs stacked on TCP */
int uv_tcp_non_ifs_lsp_ipv4;
int uv_tcp_non_ifs_lsp_ipv6;
/* Ip address used to bind to any port at any interface */
struct sockaddr_in uv_addr_ip4_any_;
struct sockaddr_in6 uv_addr_ip6_any_;
/*
* Retrieves the pointer to a winsock extension function.
*/
static BOOL uv_get_extension_function(SOCKET socket, GUID guid,
void **target) {
DWORD result, bytes;
result = WSAIoctl(socket,
SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid,
sizeof(guid),
(void*)target,
sizeof(*target),
&bytes,
NULL,
NULL);
if (result == SOCKET_ERROR) {
*target = NULL;
return FALSE;
} else {
return TRUE;
}
}
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) {
const GUID wsaid_acceptex = WSAID_ACCEPTEX;
return uv_get_extension_function(socket, wsaid_acceptex, (void**)target);
}
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) {
const GUID wsaid_connectex = WSAID_CONNECTEX;
return uv_get_extension_function(socket, wsaid_connectex, (void**)target);
}
static int error_means_no_support(DWORD error) {
return error == WSAEPROTONOSUPPORT || error == WSAESOCKTNOSUPPORT ||
error == WSAEPFNOSUPPORT || error == WSAEAFNOSUPPORT;
}
void uv_winsock_init() {
WSADATA wsa_data;
int errorno;
SOCKET dummy;
WSAPROTOCOL_INFOW protocol_info;
int opt_len;
/* Initialize winsock */
errorno = WSAStartup(MAKEWORD(2, 2), &wsa_data);
if (errorno != 0) {
uv_fatal_error(errorno, "WSAStartup");
}
/* Set implicit binding address used by connectEx */
uv_addr_ip4_any_ = uv_ip4_addr("0.0.0.0", 0);
uv_addr_ip6_any_ = uv_ip6_addr("::", 0);
/* Detect non-IFS LSPs */
dummy = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
if (dummy != INVALID_SOCKET) {
opt_len = (int) sizeof protocol_info;
if (!getsockopt(dummy,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) &protocol_info,
&opt_len) == SOCKET_ERROR)
uv_fatal_error(WSAGetLastError(), "getsockopt");
if (!(protocol_info.dwServiceFlags1 & XP1_IFS_HANDLES))
uv_tcp_non_ifs_lsp_ipv4 = 1;
if (closesocket(dummy) == SOCKET_ERROR)
uv_fatal_error(WSAGetLastError(), "closesocket");
} else if (!error_means_no_support(WSAGetLastError())) {
/* Any error other than "socket type not supported" is fatal. */
uv_fatal_error(WSAGetLastError(), "socket");
}
/* Detect IPV6 support and non-IFS LSPs */
dummy = socket(AF_INET6, SOCK_STREAM, IPPROTO_IP);
if (dummy != INVALID_SOCKET) {
opt_len = (int) sizeof protocol_info;
if (!getsockopt(dummy,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) &protocol_info,
&opt_len) == SOCKET_ERROR)
uv_fatal_error(WSAGetLastError(), "getsockopt");
if (!(protocol_info.dwServiceFlags1 & XP1_IFS_HANDLES))
uv_tcp_non_ifs_lsp_ipv6 = 1;
if (closesocket(dummy) == SOCKET_ERROR)
uv_fatal_error(WSAGetLastError(), "closesocket");
} else if (!error_means_no_support(WSAGetLastError())) {
/* Any error other than "socket type not supported" is fatal. */
uv_fatal_error(WSAGetLastError(), "socket");
}
}
int uv_ntstatus_to_winsock_error(NTSTATUS status) {
switch (status) {
case STATUS_SUCCESS:
return ERROR_SUCCESS;
case STATUS_PENDING:
return ERROR_IO_PENDING;
case STATUS_INVALID_HANDLE:
case STATUS_OBJECT_TYPE_MISMATCH:
return WSAENOTSOCK;
case STATUS_INSUFFICIENT_RESOURCES:
case STATUS_PAGEFILE_QUOTA:
case STATUS_COMMITMENT_LIMIT:
case STATUS_WORKING_SET_QUOTA:
case STATUS_NO_MEMORY:
case STATUS_CONFLICTING_ADDRESSES:
case STATUS_QUOTA_EXCEEDED:
case STATUS_TOO_MANY_PAGING_FILES:
case STATUS_REMOTE_RESOURCES:
case STATUS_TOO_MANY_ADDRESSES:
return WSAENOBUFS;
case STATUS_SHARING_VIOLATION:
case STATUS_ADDRESS_ALREADY_EXISTS:
return WSAEADDRINUSE;
case STATUS_LINK_TIMEOUT:
case STATUS_IO_TIMEOUT:
case STATUS_TIMEOUT:
return WSAETIMEDOUT;
case STATUS_GRACEFUL_DISCONNECT:
return WSAEDISCON;
case STATUS_REMOTE_DISCONNECT:
case STATUS_CONNECTION_RESET:
case STATUS_LINK_FAILED:
case STATUS_CONNECTION_DISCONNECTED:
case STATUS_PORT_UNREACHABLE:
case STATUS_HOPLIMIT_EXCEEDED:
return WSAECONNRESET;
case STATUS_LOCAL_DISCONNECT:
case STATUS_TRANSACTION_ABORTED:
case STATUS_CONNECTION_ABORTED:
return WSAECONNABORTED;
case STATUS_BAD_NETWORK_PATH:
case STATUS_NETWORK_UNREACHABLE:
case STATUS_PROTOCOL_UNREACHABLE:
return WSAENETUNREACH;
case STATUS_HOST_UNREACHABLE:
return WSAEHOSTUNREACH;
case STATUS_CANCELLED:
case STATUS_REQUEST_ABORTED:
return WSAEINTR;
case STATUS_BUFFER_OVERFLOW:
case STATUS_INVALID_BUFFER_SIZE:
return WSAEMSGSIZE;
case STATUS_BUFFER_TOO_SMALL:
case STATUS_ACCESS_VIOLATION:
return WSAEFAULT;
case STATUS_DEVICE_NOT_READY:
case STATUS_REQUEST_NOT_ACCEPTED:
return WSAEWOULDBLOCK;
case STATUS_INVALID_NETWORK_RESPONSE:
case STATUS_NETWORK_BUSY:
case STATUS_NO_SUCH_DEVICE:
case STATUS_NO_SUCH_FILE:
case STATUS_OBJECT_PATH_NOT_FOUND:
case STATUS_OBJECT_NAME_NOT_FOUND:
case STATUS_UNEXPECTED_NETWORK_ERROR:
return WSAENETDOWN;
case STATUS_INVALID_CONNECTION:
return WSAENOTCONN;
case STATUS_REMOTE_NOT_LISTENING:
case STATUS_CONNECTION_REFUSED:
return WSAECONNREFUSED;
case STATUS_PIPE_DISCONNECTED:
return WSAESHUTDOWN;
case STATUS_INVALID_ADDRESS:
case STATUS_INVALID_ADDRESS_COMPONENT:
return WSAEADDRNOTAVAIL;
case STATUS_NOT_SUPPORTED:
case STATUS_NOT_IMPLEMENTED:
return WSAEOPNOTSUPP;
case STATUS_ACCESS_DENIED:
return WSAEACCES;
default:
if ((status & (FACILITY_NTWIN32 << 16)) == (FACILITY_NTWIN32 << 16) &&
(status & (ERROR_SEVERITY_ERROR | ERROR_SEVERITY_WARNING))) {
/* It's a windows error that has been previously mapped to an */
/* ntstatus code. */
return (DWORD) (status & 0xffff);
} else {
/* The default fallback for unmappable ntstatus codes. */
return WSAEINVAL;
}
}
}
/*
* This function provides a workaround for a bug in the winsock implementation
* of WSARecv. The problem is that when SetFileCompletionNotificationModes is
* used to avoid IOCP notifications of completed reads, WSARecv does not
* reliably indicate whether we can expect a completion package to be posted
* when the receive buffer is smaller than the received datagram.
*
* However it is desirable to use SetFileCompletionNotificationModes because
* it yields a massive performance increase.
*
* This function provides a workaround for that bug, but it only works for the
* specific case that we need it for. E.g. it assumes that the "avoid iocp"
* bit has been set, and supports only overlapped operation. It also requires
* the user to use the default msafd driver, doesn't work when other LSPs are
* stacked on top of it.
*/
int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
NTSTATUS status;
void* apc_context;
IO_STATUS_BLOCK* iosb = (IO_STATUS_BLOCK*) &overlapped->Internal;
AFD_RECV_INFO info;
DWORD error;
if (overlapped == NULL || completion_routine != NULL) {
WSASetLastError(WSAEINVAL);
return SOCKET_ERROR;
}
info.BufferArray = buffers;
info.BufferCount = buffer_count;
info.AfdFlags = AFD_OVERLAPPED;
info.TdiFlags = TDI_RECEIVE_NORMAL;
if (*flags & MSG_PEEK) {
info.TdiFlags |= TDI_RECEIVE_PEEK;
}
if (*flags & MSG_PARTIAL) {
info.TdiFlags |= TDI_RECEIVE_PARTIAL;
}
if (!((intptr_t) overlapped->hEvent & 1)) {
apc_context = (void*) overlapped;
} else {
apc_context = NULL;
}
iosb->Status = STATUS_PENDING;
iosb->Pointer = 0;
status = pNtDeviceIoControlFile((HANDLE) socket,
overlapped->hEvent,
NULL,
apc_context,
iosb,
IOCTL_AFD_RECEIVE,
&info,
sizeof(info),
NULL,
0);
*flags = 0;
*bytes = (DWORD) iosb->Information;
switch (status) {
case STATUS_SUCCESS:
error = ERROR_SUCCESS;
break;
case STATUS_PENDING:
error = WSA_IO_PENDING;
break;
case STATUS_BUFFER_OVERFLOW:
error = WSAEMSGSIZE;
break;
case STATUS_RECEIVE_EXPEDITED:
error = ERROR_SUCCESS;
*flags = MSG_OOB;
break;
case STATUS_RECEIVE_PARTIAL_EXPEDITED:
error = ERROR_SUCCESS;
*flags = MSG_PARTIAL | MSG_OOB;
break;
case STATUS_RECEIVE_PARTIAL:
error = ERROR_SUCCESS;
*flags = MSG_PARTIAL;
break;
default:
error = uv_ntstatus_to_winsock_error(status);
break;
}
WSASetLastError(error);
if (error == ERROR_SUCCESS) {
return 0;
} else {
return SOCKET_ERROR;
}
}
/* See description of uv_wsarecv_workaround. */
int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
NTSTATUS status;
void* apc_context;
IO_STATUS_BLOCK* iosb = (IO_STATUS_BLOCK*) &overlapped->Internal;
AFD_RECV_DATAGRAM_INFO info;
DWORD error;
if (overlapped == NULL || addr == NULL || addr_len == NULL ||
completion_routine != NULL) {
WSASetLastError(WSAEINVAL);
return SOCKET_ERROR;
}
info.BufferArray = buffers;
info.BufferCount = buffer_count;
info.AfdFlags = AFD_OVERLAPPED;
info.TdiFlags = TDI_RECEIVE_NORMAL;
info.Address = addr;
info.AddressLength = addr_len;
if (*flags & MSG_PEEK) {
info.TdiFlags |= TDI_RECEIVE_PEEK;
}
if (*flags & MSG_PARTIAL) {
info.TdiFlags |= TDI_RECEIVE_PARTIAL;
}
if (!((intptr_t) overlapped->hEvent & 1)) {
apc_context = (void*) overlapped;
} else {
apc_context = NULL;
}
iosb->Status = STATUS_PENDING;
iosb->Pointer = 0;
status = pNtDeviceIoControlFile((HANDLE) socket,
overlapped->hEvent,
NULL,
apc_context,
iosb,
IOCTL_AFD_RECEIVE_DATAGRAM,
&info,
sizeof(info),
NULL,
0);
*flags = 0;
*bytes = (DWORD) iosb->Information;
switch (status) {
case STATUS_SUCCESS:
error = ERROR_SUCCESS;
break;
case STATUS_PENDING:
error = WSA_IO_PENDING;
break;
case STATUS_BUFFER_OVERFLOW:
error = WSAEMSGSIZE;
break;
case STATUS_RECEIVE_EXPEDITED:
error = ERROR_SUCCESS;
*flags = MSG_OOB;
break;
case STATUS_RECEIVE_PARTIAL_EXPEDITED:
error = ERROR_SUCCESS;
*flags = MSG_PARTIAL | MSG_OOB;
break;
case STATUS_RECEIVE_PARTIAL:
error = ERROR_SUCCESS;
*flags = MSG_PARTIAL;
break;
default:
error = uv_ntstatus_to_winsock_error(status);
break;
}
WSASetLastError(error);
if (error == ERROR_SUCCESS) {
return 0;
} else {
return SOCKET_ERROR;
}
}
int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info,
OVERLAPPED* overlapped) {
IO_STATUS_BLOCK iosb;
IO_STATUS_BLOCK* iosb_ptr;
HANDLE event = NULL;
void* apc_context;
NTSTATUS status;
DWORD error;
if (overlapped != NULL) {
/* Overlapped operation. */
iosb_ptr = (IO_STATUS_BLOCK*) &overlapped->Internal;
event = overlapped->hEvent;
/* Do not report iocp completion if hEvent is tagged. */
if ((uintptr_t) event & 1) {
event = (HANDLE)((uintptr_t) event & ~(uintptr_t) 1);
apc_context = NULL;
} else {
apc_context = overlapped;
}
} else {
/* Blocking operation. */
iosb_ptr = &iosb;
event = CreateEvent(NULL, FALSE, FALSE, NULL);
if (event == NULL) {
return SOCKET_ERROR;
}
apc_context = NULL;
}
iosb_ptr->Status = STATUS_PENDING;
status = pNtDeviceIoControlFile((HANDLE) socket,
event,
NULL,
apc_context,
iosb_ptr,
IOCTL_AFD_POLL,
info,
sizeof *info,
info,
sizeof *info);
if (overlapped == NULL) {
/* If this is a blocking operation, wait for the event to become */
/* signaled, and then grab the real status from the io status block. */
if (status == STATUS_PENDING) {
DWORD r = WaitForSingleObject(event, INFINITE);
if (r == WAIT_FAILED) {
DWORD saved_error = GetLastError();
CloseHandle(event);
WSASetLastError(saved_error);
return SOCKET_ERROR;
}
status = iosb.Status;
}
CloseHandle(event);
}
switch (status) {
case STATUS_SUCCESS:
error = ERROR_SUCCESS;
break;
case STATUS_PENDING:
error = WSA_IO_PENDING;
break;
default:
error = uv_ntstatus_to_winsock_error(status);
break;
}
WSASetLastError(error);
if (error == ERROR_SUCCESS) {
return 0;
} else {
return SOCKET_ERROR;
}
}

View File

@ -1,171 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_WINSOCK_H_
#define UV_WIN_WINSOCK_H_
#include <winsock2.h>
#include <iptypes.h>
#include <mswsock.h>
#include <ws2tcpip.h>
#include <windows.h>
#include "winapi.h"
/*
* MinGW is missing these too
*/
#ifndef SO_UPDATE_CONNECT_CONTEXT
# define SO_UPDATE_CONNECT_CONTEXT 0x7010
#endif
#ifndef TCP_KEEPALIVE
# define TCP_KEEPALIVE 3
#endif
#ifndef IPV6_V6ONLY
# define IPV6_V6ONLY 27
#endif
#ifndef IPV6_HOPLIMIT
# define IPV6_HOPLIMIT 21
#endif
#ifndef SIO_BASE_HANDLE
# define SIO_BASE_HANDLE 0x48000022
#endif
/*
* TDI defines that are only in the DDK.
* We only need receive flags so far.
*/
#ifndef TDI_RECEIVE_NORMAL
#define TDI_RECEIVE_BROADCAST 0x00000004
#define TDI_RECEIVE_MULTICAST 0x00000008
#define TDI_RECEIVE_PARTIAL 0x00000010
#define TDI_RECEIVE_NORMAL 0x00000020
#define TDI_RECEIVE_EXPEDITED 0x00000040
#define TDI_RECEIVE_PEEK 0x00000080
#define TDI_RECEIVE_NO_RESPONSE_EXP 0x00000100
#define TDI_RECEIVE_COPY_LOOKAHEAD 0x00000200
#define TDI_RECEIVE_ENTIRE_MESSAGE 0x00000400
#define TDI_RECEIVE_AT_DISPATCH_LEVEL 0x00000800
#define TDI_RECEIVE_CONTROL_INFO 0x00001000
#define TDI_RECEIVE_FORCE_INDICATION 0x00002000
#define TDI_RECEIVE_NO_PUSH 0x00004000
#endif
/*
* The "Auxiliary Function Driver" is the windows kernel-mode driver that does
* TCP, UDP etc. Winsock is just a layer that dispatches requests to it.
* Having these definitions allows us to bypass winsock and make an AFD kernel
* call directly, avoiding a bug in winsock's recvfrom implementation.
*/
#define AFD_NO_FAST_IO 0x00000001
#define AFD_OVERLAPPED 0x00000002
#define AFD_IMMEDIATE 0x00000004
#define AFD_POLL_RECEIVE_BIT 0
#define AFD_POLL_RECEIVE (1 << AFD_POLL_RECEIVE_BIT)
#define AFD_POLL_RECEIVE_EXPEDITED_BIT 1
#define AFD_POLL_RECEIVE_EXPEDITED (1 << AFD_POLL_RECEIVE_EXPEDITED_BIT)
#define AFD_POLL_SEND_BIT 2
#define AFD_POLL_SEND (1 << AFD_POLL_SEND_BIT)
#define AFD_POLL_DISCONNECT_BIT 3
#define AFD_POLL_DISCONNECT (1 << AFD_POLL_DISCONNECT_BIT)
#define AFD_POLL_ABORT_BIT 4
#define AFD_POLL_ABORT (1 << AFD_POLL_ABORT_BIT)
#define AFD_POLL_LOCAL_CLOSE_BIT 5
#define AFD_POLL_LOCAL_CLOSE (1 << AFD_POLL_LOCAL_CLOSE_BIT)
#define AFD_POLL_CONNECT_BIT 6
#define AFD_POLL_CONNECT (1 << AFD_POLL_CONNECT_BIT)
#define AFD_POLL_ACCEPT_BIT 7
#define AFD_POLL_ACCEPT (1 << AFD_POLL_ACCEPT_BIT)
#define AFD_POLL_CONNECT_FAIL_BIT 8
#define AFD_POLL_CONNECT_FAIL (1 << AFD_POLL_CONNECT_FAIL_BIT)
#define AFD_POLL_QOS_BIT 9
#define AFD_POLL_QOS (1 << AFD_POLL_QOS_BIT)
#define AFD_POLL_GROUP_QOS_BIT 10
#define AFD_POLL_GROUP_QOS (1 << AFD_POLL_GROUP_QOS_BIT)
#define AFD_NUM_POLL_EVENTS 11
#define AFD_POLL_ALL ((1 << AFD_NUM_POLL_EVENTS) - 1)
typedef struct _AFD_RECV_DATAGRAM_INFO {
LPWSABUF BufferArray;
ULONG BufferCount;
ULONG AfdFlags;
ULONG TdiFlags;
struct sockaddr* Address;
int* AddressLength;
} AFD_RECV_DATAGRAM_INFO, *PAFD_RECV_DATAGRAM_INFO;
typedef struct _AFD_RECV_INFO {
LPWSABUF BufferArray;
ULONG BufferCount;
ULONG AfdFlags;
ULONG TdiFlags;
} AFD_RECV_INFO, *PAFD_RECV_INFO;
#define _AFD_CONTROL_CODE(operation, method) \
((FSCTL_AFD_BASE) << 12 | (operation << 2) | method)
#define FSCTL_AFD_BASE FILE_DEVICE_NETWORK
#define AFD_RECEIVE 5
#define AFD_RECEIVE_DATAGRAM 6
#define AFD_POLL 9
#define IOCTL_AFD_RECEIVE \
_AFD_CONTROL_CODE(AFD_RECEIVE, METHOD_NEITHER)
#define IOCTL_AFD_RECEIVE_DATAGRAM \
_AFD_CONTROL_CODE(AFD_RECEIVE_DATAGRAM, METHOD_NEITHER)
#define IOCTL_AFD_POLL \
_AFD_CONTROL_CODE(AFD_POLL, METHOD_BUFFERED)
#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)
typedef struct _IP_ADAPTER_UNICAST_ADDRESS_XP {
/* FIXME: __C89_NAMELESS was removed */
/* __C89_NAMELESS */ union {
ULONGLONG Alignment;
/* __C89_NAMELESS */ struct {
ULONG Length;
DWORD Flags;
};
};
struct _IP_ADAPTER_UNICAST_ADDRESS_XP *Next;
SOCKET_ADDRESS Address;
IP_PREFIX_ORIGIN PrefixOrigin;
IP_SUFFIX_ORIGIN SuffixOrigin;
IP_DAD_STATE DadState;
ULONG ValidLifetime;
ULONG PreferredLifetime;
ULONG LeaseLifetime;
} IP_ADAPTER_UNICAST_ADDRESS_XP,*PIP_ADAPTER_UNICAST_ADDRESS_XP;
#endif
#endif /* UV_WIN_WINSOCK_H_ */

View File

@ -1,119 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "task.h"
#include "uv.h"
#include <stdio.h>
#include <stdlib.h>
#define NUM_PINGS (1000 * 1000)
#define ACCESS_ONCE(type, var) (*(volatile type*) &(var))
static unsigned int callbacks;
static volatile int done;
static const char running[] = "running";
static const char stop[] = "stop";
static const char stopped[] = "stopped";
static void async_cb(uv_async_t* handle, int status) {
if (++callbacks == NUM_PINGS) {
/* Tell the pummel thread to stop. */
ACCESS_ONCE(const char*, handle->data) = stop;
/* Wait for for the pummel thread to acknowledge that it has stoppped. */
while (ACCESS_ONCE(const char*, handle->data) != stopped)
uv_sleep(0);
uv_close((uv_handle_t*) handle, NULL);
}
}
static void pummel(void* arg) {
uv_async_t* handle = (uv_async_t*) arg;
while (ACCESS_ONCE(const char*, handle->data) == running)
uv_async_send(handle);
/* Acknowledge that we've seen handle->data change. */
ACCESS_ONCE(const char*, handle->data) = stopped;
}
static int test_async_pummel(int nthreads) {
uv_thread_t* tids;
uv_async_t handle;
uint64_t time;
int i;
tids = calloc(nthreads, sizeof(tids[0]));
ASSERT(tids != NULL);
ASSERT(0 == uv_async_init(uv_default_loop(), &handle, async_cb));
ACCESS_ONCE(const char*, handle.data) = running;
for (i = 0; i < nthreads; i++)
ASSERT(0 == uv_thread_create(tids + i, pummel, &handle));
time = uv_hrtime();
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
time = uv_hrtime() - time;
done = 1;
for (i = 0; i < nthreads; i++)
ASSERT(0 == uv_thread_join(tids + i));
printf("async_pummel_%d: %s callbacks in %.2f seconds (%s/sec)\n",
nthreads,
fmt(callbacks),
time / 1e9,
fmt(callbacks / (time / 1e9)));
free(tids);
MAKE_VALGRIND_HAPPY();
return 0;
}
BENCHMARK_IMPL(async_pummel_1) {
return test_async_pummel(1);
}
BENCHMARK_IMPL(async_pummel_2) {
return test_async_pummel(2);
}
BENCHMARK_IMPL(async_pummel_4) {
return test_async_pummel(4);
}
BENCHMARK_IMPL(async_pummel_8) {
return test_async_pummel(8);
}

View File

@ -1,139 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "task.h"
#include "uv.h"
#include <stdio.h>
#include <stdlib.h>
#define NUM_PINGS (1000 * 1000)
struct ctx {
uv_loop_t* loop;
uv_thread_t thread;
uv_async_t main_async; /* wake up main thread */
uv_async_t worker_async; /* wake up worker */
unsigned int nthreads;
unsigned int main_sent;
unsigned int main_seen;
unsigned int worker_sent;
unsigned int worker_seen;
};
static void worker_async_cb(uv_async_t* handle, int status) {
struct ctx* ctx = container_of(handle, struct ctx, worker_async);
ASSERT(0 == uv_async_send(&ctx->main_async));
ctx->worker_sent++;
ctx->worker_seen++;
if (ctx->worker_sent >= NUM_PINGS)
uv_close((uv_handle_t*) &ctx->worker_async, NULL);
}
static void main_async_cb(uv_async_t* handle, int status) {
struct ctx* ctx = container_of(handle, struct ctx, main_async);
ASSERT(0 == uv_async_send(&ctx->worker_async));
ctx->main_sent++;
ctx->main_seen++;
if (ctx->main_sent >= NUM_PINGS)
uv_close((uv_handle_t*) &ctx->main_async, NULL);
}
static void worker(void* arg) {
struct ctx* ctx = arg;
ASSERT(0 == uv_async_send(&ctx->main_async));
ASSERT(0 == uv_run(ctx->loop, UV_RUN_DEFAULT));
}
static int test_async(int nthreads) {
struct ctx* threads;
struct ctx* ctx;
uint64_t time;
int i;
threads = calloc(nthreads, sizeof(threads[0]));
ASSERT(threads != NULL);
for (i = 0; i < nthreads; i++) {
ctx = threads + i;
ctx->nthreads = nthreads;
ctx->loop = uv_loop_new();
ASSERT(ctx->loop != NULL);
ASSERT(0 == uv_async_init(ctx->loop, &ctx->worker_async, worker_async_cb));
ASSERT(0 == uv_async_init(uv_default_loop(), &ctx->main_async, main_async_cb));
ASSERT(0 == uv_thread_create(&ctx->thread, worker, ctx));
}
time = uv_hrtime();
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT));
for (i = 0; i < nthreads; i++)
ASSERT(0 == uv_thread_join(&threads[i].thread));
time = uv_hrtime() - time;
for (i = 0; i < nthreads; i++) {
ctx = threads + i;
ASSERT(ctx->worker_sent == NUM_PINGS);
ASSERT(ctx->worker_seen == NUM_PINGS);
ASSERT(ctx->main_sent == (unsigned int) NUM_PINGS);
ASSERT(ctx->main_seen == (unsigned int) NUM_PINGS);
}
printf("async%d: %.2f sec (%s/sec)\n",
nthreads,
time / 1e9,
fmt(NUM_PINGS / (time / 1e9)));
free(threads);
MAKE_VALGRIND_HAPPY();
return 0;
}
BENCHMARK_IMPL(async1) {
return test_async(1);
}
BENCHMARK_IMPL(async2) {
return test_async(2);
}
BENCHMARK_IMPL(async4) {
return test_async(4);
}
BENCHMARK_IMPL(async8) {
return test_async(8);
}

View File

@ -1,136 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "task.h"
#include "uv.h"
#include <stdio.h>
#include <stdlib.h>
#define NUM_SYNC_REQS (10 * 1e5)
#define NUM_ASYNC_REQS (1 * (int) 1e5)
#define MAX_CONCURRENT_REQS 32
#define sync_stat(req, path) \
do { \
uv_fs_stat(uv_default_loop(), (req), (path), NULL); \
uv_fs_req_cleanup((req)); \
} \
while (0)
struct async_req {
const char* path;
uv_fs_t fs_req;
int* count;
};
static void warmup(const char* path) {
uv_fs_t reqs[MAX_CONCURRENT_REQS];
unsigned int i;
/* warm up the thread pool */
for (i = 0; i < ARRAY_SIZE(reqs); i++)
uv_fs_stat(uv_default_loop(), reqs + i, path, uv_fs_req_cleanup);
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
/* warm up the OS dirent cache */
for (i = 0; i < 16; i++)
sync_stat(reqs + 0, path);
}
static void sync_bench(const char* path) {
uint64_t before;
uint64_t after;
uv_fs_t req;
int i;
/* do the sync benchmark */
before = uv_hrtime();
for (i = 0; i < NUM_SYNC_REQS; i++)
sync_stat(&req, path);
after = uv_hrtime();
printf("%s stats (sync): %.2fs (%s/s)\n",
fmt(1.0 * NUM_SYNC_REQS),
(after - before) / 1e9,
fmt((1.0 * NUM_SYNC_REQS) / ((after - before) / 1e9)));
fflush(stdout);
}
static void stat_cb(uv_fs_t* fs_req) {
struct async_req* req = container_of(fs_req, struct async_req, fs_req);
uv_fs_req_cleanup(&req->fs_req);
if (*req->count == 0) return;
uv_fs_stat(uv_default_loop(), &req->fs_req, req->path, stat_cb);
(*req->count)--;
}
static void async_bench(const char* path) {
struct async_req reqs[MAX_CONCURRENT_REQS];
struct async_req* req;
uint64_t before;
uint64_t after;
int count;
int i;
for (i = 1; i <= MAX_CONCURRENT_REQS; i++) {
count = NUM_ASYNC_REQS;
for (req = reqs; req < reqs + i; req++) {
req->path = path;
req->count = &count;
uv_fs_stat(uv_default_loop(), &req->fs_req, req->path, stat_cb);
}
before = uv_hrtime();
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
after = uv_hrtime();
printf("%s stats (%d concurrent): %.2fs (%s/s)\n",
fmt(1.0 * NUM_ASYNC_REQS),
i,
(after - before) / 1e9,
fmt((1.0 * NUM_ASYNC_REQS) / ((after - before) / 1e9)));
fflush(stdout);
}
}
/* This benchmark aims to measure the overhead of doing I/O syscalls from
* the thread pool. The stat() syscall was chosen because its results are
* easy for the operating system to cache, taking the actual I/O overhead
* out of the equation.
*/
BENCHMARK_IMPL(fs_stat) {
const char path[] = ".";
warmup(path);
sync_bench(path);
async_bench(path);
MAKE_VALGRIND_HAPPY();
return 0;
}

View File

@ -1,91 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
#include <stdlib.h>
#define CONCURRENT_CALLS 10
#define TOTAL_CALLS 10000
static const char* name = "localhost";
static uv_loop_t* loop;
static uv_getaddrinfo_t handles[CONCURRENT_CALLS];
static int calls_initiated = 0;
static int calls_completed = 0;
static int64_t start_time;
static int64_t end_time;
static void getaddrinfo_initiate(uv_getaddrinfo_t* handle);
static void getaddrinfo_cb(uv_getaddrinfo_t* handle, int status,
struct addrinfo* res) {
ASSERT(status == 0);
calls_completed++;
if (calls_initiated < TOTAL_CALLS) {
getaddrinfo_initiate(handle);
}
uv_freeaddrinfo(res);
}
static void getaddrinfo_initiate(uv_getaddrinfo_t* handle) {
int r;
calls_initiated++;
r = uv_getaddrinfo(loop, handle, &getaddrinfo_cb, name, NULL, NULL);
ASSERT(r == 0);
}
BENCHMARK_IMPL(getaddrinfo) {
int i;
loop = uv_default_loop();
uv_update_time(loop);
start_time = uv_now(loop);
for (i = 0; i < CONCURRENT_CALLS; i++) {
getaddrinfo_initiate(&handles[i]);
}
uv_run(loop, UV_RUN_DEFAULT);
uv_update_time(loop);
end_time = uv_now(loop);
ASSERT(calls_initiated == TOTAL_CALLS);
ASSERT(calls_completed == TOTAL_CALLS);
LOGF("getaddrinfo: %.0f req/s\n",
(double) calls_completed / (double) (end_time - start_time) * 1000.0);
MAKE_VALGRIND_HAPPY();
return 0;
}

Some files were not shown because too many files have changed in this diff Show More