added libuv 11

This commit is contained in:
TJIC 2014-08-20 14:11:02 -04:00
parent 12abde0fdf
commit ab533cc671
262 changed files with 75730 additions and 0 deletions

63
outside/libuv_11/.gitignore vendored Normal file
View File

@ -0,0 +1,63 @@
*.swp
*.[oa]
*.l[oa]
*.opensdf
*.orig
*.pyc
*.sdf
*.suo
core
vgcore.*
.buildstamp
.dirstamp
.deps/
/.libs/
/aclocal.m4
/ar-lib
/autom4te.cache/
/compile
/config.guess
/config.log
/config.status
/config.sub
/configure
/depcomp
/install-sh
/libtool
/libuv.a
/libuv.dylib
/libuv.pc
/libuv.so
/ltmain.sh
/missing
/test-driver
Makefile
Makefile.in
# Generated by dtrace(1) when doing an in-tree build.
/include/uv-dtrace.h
# Generated by gyp for android
*.target.mk
/out/
/build/gyp
/test/.libs/
/test/run-tests
/test/run-tests.exe
/test/run-tests.dSYM
/test/run-benchmarks
/test/run-benchmarks.exe
/test/run-benchmarks.dSYM
*.sln
*.vcproj
*.vcxproj
*.vcxproj.filters
*.vcxproj.user
_UpgradeReport_Files/
UpgradeLog*.XML
Debug
Release
ipch

34
outside/libuv_11/.mailmap Normal file
View File

@ -0,0 +1,34 @@
Aaron Bieber <qbit@deftly.net> <deftly@gmail.com>
Alan Gutierrez <alan@prettyrobots.com> <alan@blogometer.com>
Andrius Bentkus <andrius.bentkus@gmail.com> <toxedvirus@gmail.com>
Bert Belder <bertbelder@gmail.com> <info@2bs.nl>
Bert Belder <bertbelder@gmail.com> <user@ChrUbuntu.(none)>
Brandon Philips <brandon.philips@rackspace.com> <brandon@ifup.org>
Brian White <mscdex@mscdex.net>
Brian White <mscdex@mscdex.net> <mscdex@gmail.com>
Caleb James DeLisle <cjd@hyperboria.ca> <cjd@cjdns.fr>
Christoph Iserlohn <christoph.iserlohn@innoq.com>
Fedor Indutny <fedor.indutny@gmail.com> <fedor@indutny.com>
Frank Denis <github@pureftpd.org>
Isaac Z. Schlueter <i@izs.me>
Justin Venus <justin.venus@gmail.com> <justin.venus@orbitz.com>
Keno Fischer <kenof@stanford.edu> <kfischer+github@college.harvard.edu>
Keno Fischer <kenof@stanford.edu> <kfischer@college.harvard.edu>
Maciej Małecki <maciej.malecki@notimplemented.org> <me@mmalecki.com>
Marc Schlaich <marc.schlaich@googlemail.com> <marc.schlaich@gmail.com>
Rasmus Christian Pedersen <ruysch@outlook.com>
Rasmus Christian Pedersen <ruysch@outlook.com>
Rasmus Christian Pedersen <ruysch@outlook.com>
Rasmus Christian Pedersen <zerhacken@yahoo.com> <ruysch@outlook.com>
Rasmus Pedersen <ruysch@outlook.com> <zerhacken@yahoo.com>
Robert Mustacchi <rm@joyent.com> <rm@fingolfin.org>
Ryan Dahl <ryan@joyent.com> <ry@tinyclouds.org>
Ryan Emery <seebees@gmail.com>
Sam Roberts <vieuxtech@gmail.com> <sam@strongloop.com>
San-Tai Hsu <vanilla@fatpipi.com>
Saúl Ibarra Corretgé <saghul@gmail.com>
Shigeki Ohtsu <ohtsu@iij.ad.jp> <ohtsu@ohtsu.org>
Timothy J. Fontaine <tjfontaine@gmail.com>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Yazhong Liu <yorkiefixer@gmail.com>
Yuki Okumura <mjt@cltn.org>

157
outside/libuv_11/AUTHORS Normal file
View File

@ -0,0 +1,157 @@
# Authors ordered by first contribution.
Ryan Dahl <ryan@joyent.com>
Bert Belder <bertbelder@gmail.com>
Josh Roesslein <jroesslein@gmail.com>
Alan Gutierrez <alan@prettyrobots.com>
Joshua Peek <josh@joshpeek.com>
Igor Zinkovsky <igorzi@microsoft.com>
San-Tai Hsu <vanilla@fatpipi.com>
Ben Noordhuis <info@bnoordhuis.nl>
Henry Rawas <henryr@schakra.com>
Robert Mustacchi <rm@joyent.com>
Matt Stevens <matt@alloysoft.com>
Paul Querna <pquerna@apache.org>
Shigeki Ohtsu <ohtsu@iij.ad.jp>
Tom Hughes <tom.hughes@palm.com>
Peter Bright <drpizza@quiscalusmexicanus.org>
Jeroen Janssen <jeroen.janssen@gmail.com>
Andrea Lattuada <ndr.lattuada@gmail.com>
Augusto Henrique Hentz <ahhentz@gmail.com>
Clifford Heath <clifford.heath@gmail.com>
Jorge Chamorro Bieling <jorge@jorgechamorro.com>
Luis Lavena <luislavena@gmail.com>
Matthew Sporleder <msporleder@gmail.com>
Erick Tryzelaar <erick.tryzelaar@gmail.com>
Isaac Z. Schlueter <i@izs.me>
Pieter Noordhuis <pcnoordhuis@gmail.com>
Marek Jelen <marek@jelen.biz>
Fedor Indutny <fedor.indutny@gmail.com>
Saúl Ibarra Corretgé <saghul@gmail.com>
Felix Geisendörfer <felix@debuggable.com>
Yuki Okumura <mjt@cltn.org>
Roman Shtylman <shtylman@gmail.com>
Frank Denis <github@pureftpd.org>
Carter Allen <CarterA@opt-6.com>
Tj Holowaychuk <tj@vision-media.ca>
Shimon Doodkin <helpmepro1@gmail.com>
Ryan Emery <seebees@gmail.com>
Bruce Mitchener <bruce.mitchener@gmail.com>
Maciej Małecki <maciej.malecki@notimplemented.org>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Daisuke Murase <typester@cpan.org>
Paddy Byers <paddy.byers@gmail.com>
Dan VerWeire <dverweire@gmail.com>
Brandon Benvie <brandon@bbenvie.com>
Brandon Philips <brandon.philips@rackspace.com>
Nathan Rajlich <nathan@tootallnate.net>
Charlie McConnell <charlie@charlieistheman.com>
Vladimir Dronnikov <dronnikov@gmail.com>
Aaron Bieber <qbit@deftly.net>
Bulat Shakirzyanov <mallluhuct@gmail.com>
Brian White <mscdex@mscdex.net>
Erik Dubbelboer <erik@dubbelboer.com>
Keno Fischer <kenof@stanford.edu>
Ira Cooper <Ira.Cooper@mathworks.com>
Andrius Bentkus <andrius.bentkus@gmail.com>
Iñaki Baz Castillo <ibc@aliax.net>
Mark Cavage <mark.cavage@joyent.com>
George Yohng <georgegh@oss3d.com>
Xidorn Quan <quanxunzhen@gmail.com>
Roman Neuhauser <rneuhauser@suse.cz>
Shuhei Tanuma <shuhei.tanuma@gmail.com>
Bryan Cantrill <bcantrill@acm.org>
Trond Norbye <trond.norbye@gmail.com>
Tim Holy <holy@wustl.edu>
Prancesco Pertugio <meh@schizofreni.co>
Leonard Hecker <leonard.hecker91@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Luigi Grilli <luigi.grilli@gmail.com>
Shannen Saez <shannenlaptop@gmail.com>
Artur Adib <arturadib@gmail.com>
Hiroaki Nakamura <hnakamur@gmail.com>
Ting-Yu Lin <ph.minamo@cytisan.com>
Stephen Gallagher <sgallagh@redhat.com>
Shane Holloway <shane.holloway@ieee.org>
Andrew Shaffer <darawk@gmail.com>
Vlad Tudose <vlad.tudose@intel.com>
Ben Leslie <benno@benno.id.au>
Tim Bradshaw <tfb@cley.com>
Timothy J. Fontaine <tjfontaine@gmail.com>
Marc Schlaich <marc.schlaich@googlemail.com>
Brian Mazza <louseman@gmail.com>
Elliot Saba <staticfloat@gmail.com>
Ben Kelly <ben@wanderview.com>
Nils Maier <maierman@web.de>
Nicholas Vavilov <vvnicholas@gmail.com>
Miroslav Bajtoš <miro.bajtos@gmail.com>
Sean Silva <chisophugis@gmail.com>
Wynn Wilkes <wynnw@movenetworks.com>
Linus Mårtensson <linus.martensson@sonymobile.com>
Andrei Sedoi <bsnote@gmail.com>
Navaneeth Kedaram Nambiathan <navaneethkn@gmail.com>
Alex Crichton <alex@alexcrichton.com>
Brent Cook <brent@boundary.com>
Brian Kaisner <bkize1@gmail.com>
Luca Bruno <lucab@debian.org>
Reini Urban <rurban@cpanel.net>
Maks Naumov <maksqwe1@ukr.net>
Sean Farrell <sean.farrell@rioki.org>
Chris Bank <cbank@adobe.com>
Geert Jansen <geertj@gmail.com>
Christoph Iserlohn <christoph.iserlohn@innoq.com>
Steven Kabbes <stevenkabbes@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com>
huxingyi <huxingyi@msn.com>
Tenor Biel <tenorbiel@gmail.com>
Andrej Manduch <AManduch@gmail.com>
Joshua Neuheisel <joshua@neuheisel.us>
Alexis Campailla <alexis@janeasystems.com>
Yazhong Liu <yorkiefixer@gmail.com>
Sam Roberts <vieuxtech@gmail.com>
River Tarnell <river@loreley.flyingparchment.org.uk>
Nathan Sweet <nathanjsweet@gmail.com>
Luca Bruno <lucab@debian.org>
Trevor Norris <trev.norris@gmail.com>
Oguz Bastemur <obastemur@gmail.com>
Dylan Cali <calid1984@gmail.com>
Austin Foxley <austinf@cetoncorp.com>
Benjamin Saunders <ben.e.saunders@gmail.com>
Geoffry Song <goffrie@gmail.com>
Rasmus Christian Pedersen <ruysch@outlook.com>
William Light <wrl@illest.net>
Oleg Efimov <o.efimov@corp.badoo.com>
Lars Gierth <larsg@systemli.org>
Rasmus Christian Pedersen <zerhacken@yahoo.com>
Justin Venus <justin.venus@gmail.com>
Kristian Evensen <kristian.evensen@gmail.com>
Linus Mårtensson <linus.martensson@sonymobile.com>
Navaneeth Kedaram Nambiathan <navaneethkn@gmail.com>
Yorkie <yorkiefixer@gmail.com>
StarWing <weasley.wx@gmail.com>
thierry-FreeBSD <thierry@FreeBSD.org>
Isaiah Norton <isaiah.norton@gmail.com>
Raul Martins <raulms.martins@gmail.com>
David Capello <davidcapello@gmail.com>
Paul Tan <pyokagan@gmail.com>
Javier Hernández <jhernandez@emergya.com>
Tonis Tiigi <tonistiigi@gmail.com>
Norio Kobota <nori.0428@gmail.com>
李港平 <chopdown@gmail.com>
Chernyshev Viacheslav <astellar@ro.ru>
Stephen von Takach <steve@advancedcontrol.com.au>
JD Ballard <jd@pixelandline.com>
Luka Perkov <luka.perkov@sartura.hr>
Ryan Cole <ryan@rycole.com>
HungMingWu <u9089000@gmail.com>
Jay Satiro <raysatiro@yahoo.com>
Leith Bade <leith@leithalweapon.geek.nz>
Peter Atashian <retep998@gmail.com>
Tim Cooper <tim.cooper@layeh.com>
Caleb James DeLisle <cjd@hyperboria.ca>
Jameson Nash <vtjnash@gmail.com>
Graham Lee <ghmlee@ghmlee.com>
Andrew Low <Andrew_Low@ca.ibm.com>
Pavel Platto <hinidu@gmail.com>
Tony Kelman <tony@kelman.net>
John Firebaugh <john.firebaugh@gmail.com>
lilohuang <lilohuang@hotmail.com>

View File

@ -0,0 +1,166 @@
# CONTRIBUTING
The libuv project welcomes new contributors. This document will guide you
through the process.
### FORK
Fork the project [on GitHub](https://github.com/joyent/libuv) and check out
your copy.
```
$ git clone https://github.com/username/libuv.git
$ cd libuv
$ git remote add upstream https://github.com/joyent/libuv.git
```
Now decide if you want your feature or bug fix to go into the master branch
or the stable branch. As a rule of thumb, bug fixes go into the stable branch
while new features go into the master branch.
The stable branch is effectively frozen; patches that change the libuv
API/ABI or affect the run-time behavior of applications get rejected.
In case of doubt, open an issue in the [issue tracker][], post your question
to the [libuv mailing list], or contact one of project maintainers
(@bnoordhuis, @piscisaureus, @indutny or @saghul) on [IRC][].
Especially do so if you plan to work on something big. Nothing is more
frustrating than seeing your hard work go to waste because your vision
does not align with that of a project maintainers.
### BRANCH
Okay, so you have decided on the proper branch. Create a feature branch
and start hacking:
```
$ git checkout -b my-feature-branch -t origin/v0.10
```
(Where v0.10 is the latest stable branch as of this writing.)
### CODE
Please adhere to libuv's code style. In general it follows the conventions from
the [Google C/C++ style guide]. Some of the key points, as well as some
additional guidelines, are enumerated below.
* Code that is specific to unix-y platforms should be placed in `src/unix`, and
declarations go into `src/uv-unix.h`.
* Source code that is Windows-specific goes into `src/win`, and related
publicly exported types, functions and macro declarations should generally
be declared in `include/uv-win.h`.
* Names should be descriptive and concise.
* All the symbols and types that libuv makes available publicly should be
prefixed with `uv_` (or `UV_` in case of macros).
* Internal, non-static functions should be prefixed with `uv__`.
* Use two spaces and no tabs.
* Lines should be wrapped at 80 characters.
* Ensure that lines have no trailing whitespace, and use unix-style (LF) line
endings.
* Use C89-compliant syntax. In other words, variables can only be declared at
the top of a scope (function, if/for/while-block).
* When writing comments, use properly constructed sentences, including
punctuation.
* When documenting APIs and/or source code, don't make assumptions or make
implications about race, gender, religion, political orientation or anything
else that isn't relevant to the project.
* Remember that source code usually gets written once and read often: ensure
the reader doesn't have to make guesses. Make sure that the purpose and inner
logic are either obvious to a reasonably skilled professional, or add a
comment that explains it.
### COMMIT
Make sure git knows your name and email address:
```
$ git config --global user.name "J. Random User"
$ git config --global user.email "j.random.user@example.com"
```
Writing good commit logs is important. A commit log should describe what
changed and why. Follow these guidelines when writing one:
1. The first line should be 50 characters or less and contain a short
description of the change prefixed with the name of the changed
subsystem (e.g. "net: add localAddress and localPort to Socket").
2. Keep the second line blank.
3. Wrap all other lines at 72 columns.
A good commit log looks like this:
```
subsystem: explaining the commit in one line
Body of commit message is a few lines of text, explaining things
in more detail, possibly giving some background about the issue
being fixed, etc etc.
The body of the commit message can be several paragraphs, and
please do proper word-wrap and keep columns shorter than about
72 characters or so. That way `git log` will show things
nicely even when it is indented.
```
The header line should be meaningful; it is what other people see when they
run `git shortlog` or `git log --oneline`.
Check the output of `git log --oneline files_that_you_changed` to find out
what subsystem (or subsystems) your changes touch.
### REBASE
Use `git rebase` (not `git merge`) to sync your work from time to time.
```
$ git fetch upstream
$ git rebase upstream/v0.10 # or upstream/master
```
### TEST
Bug fixes and features should come with tests. Add your tests in the
`test/` directory. Tests also need to be registered in `test/test-list.h`.
Look at other tests to see how they should be structured (license boilerplate,
the way entry points are declared, etc.).
Check README.md file to find out how to run the test suite and make sure that
there are no test regressions.
### PUSH
```
$ git push origin my-feature-branch
```
Go to https://github.com/username/libuv and select your feature branch. Click
the 'Pull Request' button and fill out the form.
Pull requests are usually reviewed within a few days. If there are comments
to address, apply your changes in a separate commit and push that to your
feature branch. Post a comment in the pull request afterwards; GitHub does
not send out notifications when you add commits.
[issue tracker]: https://github.com/joyent/libuv/issues
[libuv mailing list]: http://groups.google.com/group/libuv
[IRC]: http://webchat.freelibuv.net/?channels=libuv
[Google C/C++ style guide]: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml

1380
outside/libuv_11/ChangeLog Normal file

File diff suppressed because it is too large Load Diff

46
outside/libuv_11/LICENSE Normal file
View File

@ -0,0 +1,46 @@
libuv is part of the Node project: http://nodejs.org/
libuv may be distributed alone under Node's license:
====
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
====
This license applies to all parts of libuv that are not externally
maintained libraries.
The externally maintained libraries used by libuv are:
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
- inet_pton and inet_ntop implementations, contained in src/inet.c, are
copyright the Internet Systems Consortium, Inc., and licensed under the ISC
license.
- stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
clause BSD license.
- pthread-fixes.h, pthread-fixes.c, copyright Google Inc. and Sony Mobile
Communications AB. Three clause BSD license.
- android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
n° 289016). Three clause BSD license.

View File

@ -0,0 +1,336 @@
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ACLOCAL_AMFLAGS = -I m4
AM_CPPFLAGS = -I$(top_srcdir)/include \
-I$(top_srcdir)/src
include_HEADERS=include/uv.h include/uv-errno.h include/uv-threadpool.h include/uv-version.h
CLEANFILES =
lib_LTLIBRARIES = libuv.la
libuv_la_CFLAGS = @CFLAGS@
libuv_la_LDFLAGS = -no-undefined -version-info 11:0:0
libuv_la_SOURCES = src/fs-poll.c \
src/heap-inl.h \
src/inet.c \
src/queue.h \
src/threadpool.c \
src/uv-common.c \
src/uv-common.h \
src/version.c
if SUNOS
libuv_la_CFLAGS += -pthread
endif
if WINNT
include_HEADERS += include/uv-win.h include/tree.h
AM_CPPFLAGS += -I$(top_srcdir)/src/win \
-DWIN32_LEAN_AND_MEAN \
-D_WIN32_WINNT=0x0600
LIBS += -lws2_32 -lpsapi -liphlpapi -lshell32
libuv_la_SOURCES += src/win/async.c \
src/win/atomicops-inl.h \
src/win/core.c \
src/win/dl.c \
src/win/error.c \
src/win/fs-event.c \
src/win/fs.c \
src/win/getaddrinfo.c \
src/win/getnameinfo.c \
src/win/handle.c \
src/win/handle-inl.h \
src/win/internal.h \
src/win/loop-watcher.c \
src/win/pipe.c \
src/win/poll.c \
src/win/process-stdio.c \
src/win/process.c \
src/win/req.c \
src/win/req-inl.h \
src/win/signal.c \
src/win/stream.c \
src/win/stream-inl.h \
src/win/tcp.c \
src/win/thread.c \
src/win/timer.c \
src/win/tty.c \
src/win/udp.c \
src/win/util.c \
src/win/winapi.c \
src/win/winapi.h \
src/win/winsock.c \
src/win/winsock.h
else # WINNT
include_HEADERS += include/uv-unix.h
AM_CPPFLAGS += -I$(top_srcdir)/src/unix
libuv_la_SOURCES += src/unix/async.c \
src/unix/atomic-ops.h \
src/unix/core.c \
src/unix/dl.c \
src/unix/fs.c \
src/unix/getaddrinfo.c \
src/unix/getnameinfo.c \
src/unix/internal.h \
src/unix/loop-watcher.c \
src/unix/loop.c \
src/unix/pipe.c \
src/unix/poll.c \
src/unix/process.c \
src/unix/signal.c \
src/unix/spinlock.h \
src/unix/stream.c \
src/unix/tcp.c \
src/unix/thread.c \
src/unix/timer.c \
src/unix/tty.c \
src/unix/udp.c
endif # WINNT
TESTS = test/run-tests
check_PROGRAMS = test/run-tests
test_run_tests_CFLAGS =
test_run_tests_SOURCES = test/blackhole-server.c \
test/dns-server.c \
test/echo-server.c \
test/run-tests.c \
test/runner.c \
test/runner.h \
test/task.h \
test/test-active.c \
test/test-async.c \
test/test-async-null-cb.c \
test/test-barrier.c \
test/test-callback-order.c \
test/test-callback-stack.c \
test/test-close-fd.c \
test/test-close-order.c \
test/test-condvar.c \
test/test-connection-fail.c \
test/test-cwd-and-chdir.c \
test/test-delayed-accept.c \
test/test-dlerror.c \
test/test-embed.c \
test/test-emfile.c \
test/test-error.c \
test/test-fail-always.c \
test/test-fs-event.c \
test/test-fs-poll.c \
test/test-fs.c \
test/test-get-currentexe.c \
test/test-get-loadavg.c \
test/test-get-memory.c \
test/test-getaddrinfo.c \
test/test-getnameinfo.c \
test/test-getsockname.c \
test/test-hrtime.c \
test/test-idle.c \
test/test-ip4-addr.c \
test/test-ip6-addr.c \
test/test-ipc-send-recv.c \
test/test-ipc.c \
test/test-list.h \
test/test-loop-handles.c \
test/test-loop-alive.c \
test/test-loop-close.c \
test/test-loop-stop.c \
test/test-loop-time.c \
test/test-multiple-listen.c \
test/test-mutexes.c \
test/test-osx-select.c \
test/test-pass-always.c \
test/test-ping-pong.c \
test/test-pipe-bind-error.c \
test/test-pipe-connect-error.c \
test/test-pipe-getsockname.c \
test/test-pipe-sendmsg.c \
test/test-pipe-server-close.c \
test/test-platform-output.c \
test/test-poll-close.c \
test/test-poll-closesocket.c \
test/test-poll.c \
test/test-process-title.c \
test/test-ref.c \
test/test-run-nowait.c \
test/test-run-once.c \
test/test-semaphore.c \
test/test-shutdown-close.c \
test/test-shutdown-eof.c \
test/test-shutdown-twice.c \
test/test-signal-multiple-loops.c \
test/test-signal.c \
test/test-socket-buffer-size.c \
test/test-spawn.c \
test/test-stdio-over-pipes.c \
test/test-tcp-bind-error.c \
test/test-tcp-bind6-error.c \
test/test-tcp-close-accept.c \
test/test-tcp-close-while-connecting.c \
test/test-tcp-close.c \
test/test-tcp-connect-error-after-write.c \
test/test-tcp-connect-error.c \
test/test-tcp-connect-timeout.c \
test/test-tcp-connect6-error.c \
test/test-tcp-flags.c \
test/test-tcp-open.c \
test/test-tcp-read-stop.c \
test/test-tcp-shutdown-after-write.c \
test/test-tcp-unexpected-read.c \
test/test-tcp-write-to-half-open-connection.c \
test/test-tcp-writealot.c \
test/test-tcp-try-write.c \
test/test-tcp-write-queue-order.c \
test/test-thread.c \
test/test-threadpool-cancel.c \
test/test-threadpool.c \
test/test-timer-again.c \
test/test-timer-from-check.c \
test/test-timer.c \
test/test-tty.c \
test/test-udp-bind.c \
test/test-udp-dgram-too-big.c \
test/test-udp-ipv6.c \
test/test-udp-multicast-interface.c \
test/test-udp-multicast-interface6.c \
test/test-udp-multicast-join.c \
test/test-udp-multicast-join6.c \
test/test-udp-multicast-ttl.c \
test/test-udp-open.c \
test/test-udp-options.c \
test/test-udp-send-and-recv.c \
test/test-udp-send-immediate.c \
test/test-udp-try-send.c \
test/test-walk-handles.c \
test/test-watcher-cross-stop.c
test_run_tests_LDADD = libuv.la
if WINNT
test_run_tests_SOURCES += test/runner-win.c \
test/runner-win.h
else
test_run_tests_SOURCES += test/runner-unix.c \
test/runner-unix.h
endif
if AIX
test_run_tests_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT
endif
if SUNOS
test_run_tests_CFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500
endif
if AIX
libuv_la_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT
libuv_la_SOURCES += src/unix/aix.c
endif
if ANDROID
include_HEADERS += include/android-ifaddrs.h \
include/pthread-fixes.h
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
src/unix/pthread-fixes.c
endif
if DARWIN
include_HEADERS += include/uv-darwin.h
libuv_la_CFLAGS += -D_DARWIN_USE_64_BIT_INODE=1
libuv_la_SOURCES += src/unix/darwin.c \
src/unix/darwin-proctitle.c \
src/unix/fsevents.c \
src/unix/kqueue.c \
src/unix/proctitle.c
endif
if FREEBSD
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/freebsd.c src/unix/kqueue.c
endif
if LINUX
include_HEADERS += include/uv-linux.h
libuv_la_SOURCES += src/unix/linux-core.c \
src/unix/linux-inotify.c \
src/unix/linux-syscalls.c \
src/unix/linux-syscalls.h \
src/unix/proctitle.c
endif
if NETBSD
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/kqueue.c src/unix/netbsd.c
endif
if OPENBSD
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/kqueue.c src/unix/openbsd.c
endif
if SUNOS
include_HEADERS += include/uv-sunos.h
libuv_la_CFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500
libuv_la_SOURCES += src/unix/sunos.c
endif
if HAVE_DTRACE
BUILT_SOURCES = include/uv-dtrace.h
CLEANFILES += include/uv-dtrace.h
if FREEBSD
libuv_la_LDFLAGS += -lelf
endif
endif
if DTRACE_NEEDS_OBJECTS
libuv_la_SOURCES += src/unix/uv-dtrace.d
libuv_la_DEPENDENCIES = src/unix/uv-dtrace.o
libuv_la_LIBADD = uv-dtrace.lo
CLEANFILES += src/unix/uv-dtrace.o src/unix/uv-dtrace.lo
endif
if HAVE_PKG_CONFIG
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = @PACKAGE_NAME@.pc
endif
if HAVE_DTRACE
include/uv-dtrace.h: src/unix/uv-dtrace.d
$(AM_V_GEN)$(DTRACE) $(DTRACEFLAGS) -h -xnolibs -s $< -o $(top_srcdir)/$@
endif
if DTRACE_NEEDS_OBJECTS
SUFFIXES = .d
src/unix/uv-dtrace.o: src/unix/uv-dtrace.d ${libuv_la_OBJECTS}
# It's ok to specify the output here, because we have 1 .d file, and we process
# every created .o, most projects don't need to include more than one .d
.d.o:
$(AM_V_GEN)$(DTRACE) $(DTRACEFLAGS) -G -o $(top_builddir)/uv-dtrace.o -s $< \
`find ${top_builddir}/src -name "*.o"`
$(AM_V_GEN)printf %s\\n \
'# ${top_builddir}/uv-dtrace.lo - a libtool object file' \
'# Generated by libtool (GNU libtool) 2.4' \
'# libtool wants a .lo not a .o' \
"pic_object='uv-dtrace.o'" \
"non_pic_object='uv-dtrace.o'" \
> ${top_builddir}/uv-dtrace.lo
endif

View File

@ -0,0 +1,84 @@
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
CC ?= gcc
CFLAGS += -Wall \
-Wextra \
-Wno-unused-parameter \
-Iinclude \
-Isrc \
-Isrc/win \
-DWIN32_LEAN_AND_MEAN \
-D_WIN32_WINNT=0x0600
INCLUDES = include/stdint-msvc2008.h \
include/tree.h \
include/uv-errno.h \
include/uv-threadpool.h \
include/uv-version.h \
include/uv-win.h \
include/uv.h \
src/heap-inl.h \
src/queue.h \
src/uv-common.h \
src/win/atomicops-inl.h \
src/win/handle-inl.h \
src/win/internal.h \
src/win/req-inl.h \
src/win/stream-inl.h \
src/win/winapi.h \
src/win/winsock.h
OBJS = src/fs-poll.o \
src/inet.o \
src/threadpool.o \
src/uv-common.o \
src/version.o \
src/win/async.o \
src/win/core.o \
src/win/dl.o \
src/win/error.o \
src/win/fs-event.o \
src/win/fs.o \
src/win/getaddrinfo.o \
src/win/getnameinfo.o \
src/win/handle.o \
src/win/loop-watcher.o \
src/win/pipe.o \
src/win/poll.o \
src/win/process-stdio.o \
src/win/process.o \
src/win/req.o \
src/win/signal.o \
src/win/stream.o \
src/win/tcp.o \
src/win/thread.o \
src/win/timer.o \
src/win/tty.o \
src/win/udp.o \
src/win/util.o \
src/win/winapi.o \
src/win/winsock.o
all: libuv.a
clean:
-$(RM) $(OBJS) libuv.a
libuv.a: $(OBJS)
$(AR) crs $@ $^
$(OBJS): %.o : %.c $(INCLUDES)
$(CC) $(CFLAGS) -c -o $@ $<

146
outside/libuv_11/README.md Normal file
View File

@ -0,0 +1,146 @@
![libuv][libuv_banner]
## Overview
libuv is a multi-platform support library with a focus on asynchronous I/O. It
was primarily developed for use by [Node.js](http://nodejs.org), but it's also
used by Mozilla's [Rust language](http://www.rust-lang.org/),
[Luvit](http://luvit.io/), [Julia](http://julialang.org/),
[pyuv](https://github.com/saghul/pyuv), and [others](https://github.com/joyent/libuv/wiki/Projects-that-use-libuv).
## Feature highlights
* Full-featured event loop backed by epoll, kqueue, IOCP, event ports.
* Asynchronous TCP and UDP sockets
* Asynchronous DNS resolution
* Asynchronous file and file system operations
* File system events
* ANSI escape code controlled TTY
* IPC with socket sharing, using Unix domain sockets or named pipes (Windows)
* Child processes
* Thread pool
* Signal handling
* High resolution clock
* Threading and synchronization primitives
## Community
* [Mailing list](http://groups.google.com/group/libuv)
## Documentation
* [include/uv.h](https://github.com/joyent/libuv/blob/master/include/uv.h)
&mdash; API documentation in the form of detailed header comments.
* [An Introduction to libuv](http://nikhilm.github.com/uvbook/)
&mdash; An overview of libuv with tutorials.
* [LXJS 2012 talk](http://www.youtube.com/watch?v=nGn60vDSxQ4)
&mdash; High-level introductory talk about libuv.
* [Tests and benchmarks](https://github.com/joyent/libuv/tree/master/test)
&mdash; API specification and usage examples.
* [libuv-dox](https://github.com/thlorenz/libuv-dox)
&mdash; Documenting types and methods of libuv, mostly by reading uv.h.
## Build Instructions
For GCC there are two methods building: via autotools or via [GYP][].
GYP is a meta-build system which can generate MSVS, Makefile, and XCode
backends. It is best used for integration into other projects.
To build with autotools:
$ sh autogen.sh
$ ./configure
$ make
$ make check
$ make install
### Windows
First, [Python][] 2.6 or 2.7 must be installed as it is required by [GYP][].
If python is not in your path set the environment variable `PYTHON` to its
location. For example: `set PYTHON=C:\Python27\python.exe`
To build with Visual Studio, launch a git shell (e.g. Cmd or PowerShell)
and run vcbuild.bat which will checkout the GYP code into build/gyp and
generate uv.sln as well as related project files.
To have GYP generate build script for another system, checkout GYP into the
project tree manually:
$ mkdir -p build
$ git clone https://git.chromium.org/external/gyp.git build/gyp
### Unix
Run:
$ ./gyp_uv.py -f make
$ make -C out
### OS X
Run:
$ ./gyp_uv.py -f xcode
$ xcodebuild -ARCHS="x86_64" -project uv.xcodeproj \
-configuration Release -target All
Note to OS X users:
Make sure that you specify the architecture you wish to build for in the
"ARCHS" flag. You can specify more than one by delimiting with a space
(e.g. "x86_64 i386").
### Android
Run:
$ source ./android-configure NDK_PATH gyp
$ make -C out
Note for UNIX users: compile your project with `-D_LARGEFILE_SOURCE` and
`-D_FILE_OFFSET_BITS=64`. GYP builds take care of that automatically.
### Running tests
Run:
$ ./gyp_uv.py -f make
$ make -C out
$ ./out/Debug/run-tests
## Supported Platforms
Microsoft Windows operating systems since Windows XP SP2. It can be built
with either Visual Studio or MinGW. Consider using
[Visual Studio Express 2010][] or later if you do not have a full Visual
Studio license.
Linux using the GCC toolchain.
OS X using the GCC or XCode toolchain.
Solaris 121 and later using GCC toolchain.
## Patches
See the [guidelines for contributing][].
[node.js]: http://nodejs.org/
[GYP]: http://code.google.com/p/gyp/
[Python]: https://www.python.org/downloads/
[Visual Studio Express 2010]: http://www.microsoft.com/visualstudio/eng/products/visual-studio-2010-express
[guidelines for contributing]: https://github.com/joyent/libuv/blob/master/CONTRIBUTING.md
[libuv_banner]: https://raw.githubusercontent.com/joyent/libuv/master/img/banner.png

View File

@ -0,0 +1,20 @@
#!/bin/bash
export TOOLCHAIN=$PWD/android-toolchain
mkdir -p $TOOLCHAIN
$1/build/tools/make-standalone-toolchain.sh \
--toolchain=arm-linux-androideabi-4.8 \
--arch=arm \
--install-dir=$TOOLCHAIN \
--platform=android-9
export PATH=$TOOLCHAIN/bin:$PATH
export AR=arm-linux-androideabi-ar
export CC=arm-linux-androideabi-gcc
export CXX=arm-linux-androideabi-g++
export LINK=arm-linux-androideabi-g++
export PLATFORM=android
if [ $2 -a $2 == 'gyp' ]
then
./gyp_uv.py -Dtarget_arch=arm -DOS=android
fi

46
outside/libuv_11/autogen.sh Executable file
View File

@ -0,0 +1,46 @@
#!/bin/sh
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
cd `dirname "$0"`
if [ "$LIBTOOLIZE" = "" ] && [ "`uname`" = "Darwin" ]; then
LIBTOOLIZE=glibtoolize
fi
ACLOCAL=${ACLOCAL:-aclocal}
AUTOCONF=${AUTOCONF:-autoconf}
AUTOMAKE=${AUTOMAKE:-automake}
LIBTOOLIZE=${LIBTOOLIZE:-libtoolize}
automake_version=`"$AUTOMAKE" --version | head -n 1 | sed 's/[^.0-9]//g'`
automake_version_major=`echo "$automake_version" | cut -d. -f1`
automake_version_minor=`echo "$automake_version" | cut -d. -f2`
UV_EXTRA_AUTOMAKE_FLAGS=
if test "$automake_version_major" -gt 1 || \
test "$automake_version_major" -eq 1 && \
test "$automake_version_minor" -gt 11; then
# serial-tests is available in v0.12 and newer.
UV_EXTRA_AUTOMAKE_FLAGS="$UV_EXTRA_AUTOMAKE_FLAGS serial-tests"
fi
echo "m4_define([UV_EXTRA_AUTOMAKE_FLAGS], [$UV_EXTRA_AUTOMAKE_FLAGS])" \
> m4/libuv-extra-automake-flags.m4
set -ex
"$LIBTOOLIZE"
"$ACLOCAL" -I m4
"$AUTOCONF"
"$AUTOMAKE" --add-missing --copy

232
outside/libuv_11/checksparse.sh Executable file
View File

@ -0,0 +1,232 @@
#!/bin/sh
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
SPARSE=${SPARSE:-sparse}
SPARSE_FLAGS=${SPARSE_FLAGS:-"
-D__POSIX__
-Wsparse-all
-Wno-do-while
-Wno-transparent-union
-Iinclude
-Isrc
"}
SOURCES="
include/tree.h
include/uv-unix.h
include/uv.h
src/fs-poll.c
src/inet.c
src/queue.h
src/unix/async.c
src/unix/core.c
src/unix/dl.c
src/unix/fs.c
src/unix/getaddrinfo.c
src/unix/internal.h
src/unix/loop-watcher.c
src/unix/loop.c
src/unix/pipe.c
src/unix/poll.c
src/unix/process.c
src/unix/signal.c
src/unix/stream.c
src/unix/tcp.c
src/unix/thread.c
src/unix/threadpool.c
src/unix/timer.c
src/unix/tty.c
src/unix/udp.c
src/uv-common.c
src/uv-common.h
"
TESTS="
test/benchmark-async-pummel.c
test/benchmark-async.c
test/benchmark-fs-stat.c
test/benchmark-getaddrinfo.c
test/benchmark-loop-count.c
test/benchmark-million-async.c
test/benchmark-million-timers.c
test/benchmark-multi-accept.c
test/benchmark-ping-pongs.c
test/benchmark-pound.c
test/benchmark-pump.c
test/benchmark-sizes.c
test/benchmark-spawn.c
test/benchmark-tcp-write-batch.c
test/benchmark-thread.c
test/benchmark-udp-pummel.c
test/blackhole-server.c
test/dns-server.c
test/echo-server.c
test/run-benchmarks.c
test/run-tests.c
test/runner-unix.c
test/runner-unix.h
test/runner.c
test/runner.h
test/task.h
test/test-active.c
test/test-async.c
test/test-barrier.c
test/test-callback-order.c
test/test-callback-stack.c
test/test-condvar.c
test/test-connection-fail.c
test/test-cwd-and-chdir.c
test/test-delayed-accept.c
test/test-dlerror.c
test/test-embed.c
test/test-error.c
test/test-fail-always.c
test/test-fs-event.c
test/test-fs-poll.c
test/test-fs.c
test/test-get-currentexe.c
test/test-get-loadavg.c
test/test-get-memory.c
test/test-getaddrinfo.c
test/test-getsockname.c
test/test-hrtime.c
test/test-idle.c
test/test-ip6-addr.c
test/test-ipc-send-recv.c
test/test-ipc.c
test/test-loop-handles.c
test/test-multiple-listen.c
test/test-mutexes.c
test/test-pass-always.c
test/test-ping-pong.c
test/test-pipe-bind-error.c
test/test-pipe-connect-error.c
test/test-pipe-sendmsg.c
test/test-pipe-server-close.c
test/test-platform-output.c
test/test-poll-close.c
test/test-poll.c
test/test-process-title.c
test/test-ref.c
test/test-run-nowait.c
test/test-run-once.c
test/test-semaphore.c
test/test-shutdown-close.c
test/test-shutdown-eof.c
test/test-signal-multiple-loops.c
test/test-signal.c
test/test-spawn.c
test/test-stdio-over-pipes.c
test/test-tcp-bind-error.c
test/test-tcp-bind6-error.c
test/test-tcp-close-while-connecting.c
test/test-tcp-close-accept.c
test/test-tcp-close.c
test/test-tcp-connect-error-after-write.c
test/test-tcp-connect-error.c
test/test-tcp-connect-timeout.c
test/test-tcp-connect6-error.c
test/test-tcp-flags.c
test/test-tcp-open.c
test/test-tcp-read-stop.c
test/test-tcp-shutdown-after-write.c
test/test-tcp-unexpected-read.c
test/test-tcp-write-error.c
test/test-tcp-write-to-half-open-connection.c
test/test-tcp-writealot.c
test/test-thread.c
test/test-threadpool-cancel.c
test/test-threadpool.c
test/test-timer-again.c
test/test-timer.c
test/test-tty.c
test/test-udp-dgram-too-big.c
test/test-udp-ipv6.c
test/test-udp-multicast-join.c
test/test-udp-multicast-ttl.c
test/test-udp-open.c
test/test-udp-options.c
test/test-udp-send-and-recv.c
test/test-walk-handles.c
test/test-watcher-cross-stop.c
"
case `uname -s` in
AIX)
SPARSE_FLAGS="$SPARSE_FLAGS -D_AIX=1"
SOURCES="$SOURCES
src/unix/aix.c"
;;
Darwin)
SPARSE_FLAGS="$SPARSE_FLAGS -D__APPLE__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/darwin.c
src/unix/kqueue.c
src/unix/fsevents.c"
;;
DragonFly)
SPARSE_FLAGS="$SPARSE_FLAGS -D__DragonFly__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/freebsd.c"
;;
FreeBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__FreeBSD__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/freebsd.c"
;;
Linux)
SPARSE_FLAGS="$SPARSE_FLAGS -D__linux__=1"
SOURCES="$SOURCES
include/uv-linux.h
src/unix/linux-inotify.c
src/unix/linux-core.c
src/unix/linux-syscalls.c
src/unix/linux-syscalls.h"
;;
NetBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__NetBSD__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/netbsd.c"
;;
OpenBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__OpenBSD__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/openbsd.c"
;;
SunOS)
SPARSE_FLAGS="$SPARSE_FLAGS -D__sun=1"
SOURCES="$SOURCES
include/uv-sunos.h
src/unix/sunos.c"
;;
esac
for ARCH in __i386__ __x86_64__ __arm__ __mips__; do
$SPARSE $SPARSE_FLAGS -D$ARCH=1 $SOURCES
done
# Tests are architecture independent.
$SPARSE $SPARSE_FLAGS -Itest $TESTS

View File

@ -0,0 +1,208 @@
{
'variables': {
'visibility%': 'hidden', # V8's visibility setting
'target_arch%': 'ia32', # set v8's target architecture
'host_arch%': 'ia32', # set v8's host architecture
'uv_library%': 'static_library', # allow override to 'shared_library' for DLL/.so builds
'component%': 'static_library', # NB. these names match with what V8 expects
'msvs_multi_core_compile': '0', # we do enable multicore compiles, but not using the V8 way
'gcc_version%': 'unknown',
'clang%': 0,
},
'target_defaults': {
'default_configuration': 'Debug',
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'cflags': [ '-g', '-O0', '-fwrapv' ],
'msvs_settings': {
'VCCLCompilerTool': {
'target_conditions': [
['uv_library=="static_library"', {
'RuntimeLibrary': 1, # static debug
}, {
'RuntimeLibrary': 3, # DLL debug
}],
],
'Optimization': 0, # /Od, no optimization
'MinimalRebuild': 'false',
'OmitFramePointers': 'false',
'BasicRuntimeChecks': 3, # /RTC1
},
'VCLinkerTool': {
'LinkIncremental': 2, # enable incremental linking
},
},
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0',
'OTHER_CFLAGS': [ '-Wno-strict-aliasing' ],
},
'conditions': [
['OS != "win"', {
'defines': [ 'EV_VERIFY=2' ],
}],
]
},
'Release': {
'defines': [ 'NDEBUG' ],
'cflags': [
'-O3',
'-fstrict-aliasing',
'-fomit-frame-pointer',
'-fdata-sections',
'-ffunction-sections',
],
'msvs_settings': {
'VCCLCompilerTool': {
'target_conditions': [
['uv_library=="static_library"', {
'RuntimeLibrary': 0, # static release
}, {
'RuntimeLibrary': 2, # debug release
}],
],
'Optimization': 3, # /Ox, full optimization
'FavorSizeOrSpeed': 1, # /Ot, favour speed over size
'InlineFunctionExpansion': 2, # /Ob2, inline anything eligible
'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG
'OmitFramePointers': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
},
'VCLibrarianTool': {
'AdditionalOptions': [
'/LTCG', # link time code generation
],
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': 1, # link-time code generation
'OptimizeReferences': 2, # /OPT:REF
'EnableCOMDATFolding': 2, # /OPT:ICF
'LinkIncremental': 1, # disable incremental linking
},
},
}
},
'msvs_settings': {
'VCCLCompilerTool': {
'StringPooling': 'true', # pool string literals
'DebugInformationFormat': 3, # Generate a PDB
'WarningLevel': 3,
'BufferSecurityCheck': 'true',
'ExceptionHandling': 1, # /EHsc
'SuppressStartupBanner': 'true',
'WarnAsError': 'false',
'AdditionalOptions': [
'/MP', # compile across multiple CPUs
],
},
'VCLibrarianTool': {
},
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'RandomizedBaseAddress': 2, # enable ASLR
'DataExecutionPrevention': 2, # enable DEP
'AllowIsolation': 'true',
'SuppressStartupBanner': 'true',
'target_conditions': [
['_type=="executable"', {
'SubSystem': 1, # console executable
}],
],
},
},
'conditions': [
['OS == "win"', {
'msvs_cygwin_shell': 0, # prevent actions from trying to use cygwin
'defines': [
'WIN32',
# we don't really want VC++ warning us about
# how dangerous C functions are...
'_CRT_SECURE_NO_DEPRECATE',
# ... or that C implementations shouldn't use
# POSIX names
'_CRT_NONSTDC_NO_DEPRECATE',
],
'target_conditions': [
['target_arch=="x64"', {
'msvs_configuration_platform': 'x64'
}]
]
}],
['OS in "freebsd linux openbsd solaris android"', {
'cflags': [ '-Wall' ],
'cflags_cc': [ '-fno-rtti', '-fno-exceptions' ],
'target_conditions': [
['_type=="static_library"', {
'standalone_static_library': 1, # disable thin archive which needs binutils >= 2.19
}],
],
'conditions': [
[ 'host_arch != target_arch and target_arch=="ia32"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
[ 'OS=="linux"', {
'cflags': [ '-ansi' ],
}],
[ 'OS=="solaris"', {
'cflags': [ '-pthreads' ],
'ldflags': [ '-pthreads' ],
}],
[ 'OS not in "solaris android"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
}],
[ 'visibility=="hidden" and (clang==1 or gcc_version >= 40)', {
'cflags': [ '-fvisibility=hidden' ],
}],
],
}],
['OS=="mac"', {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks
'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic
# (Equivalent to -fPIC)
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO', # -fno-exceptions
'GCC_ENABLE_CPP_RTTI': 'NO', # -fno-rtti
'GCC_ENABLE_PASCAL_STRINGS': 'NO', # No -mpascal-strings
# GCC_INLINES_ARE_PRIVATE_EXTERN maps to -fvisibility-inlines-hidden
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'PREBINDING': 'NO', # No -Wl,-prebind
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fstrict-aliasing',
],
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
],
},
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {'ARCHS': ['i386']},
}],
['target_arch=="x64"', {
'xcode_settings': {'ARCHS': ['x86_64']},
}],
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
}],
],
}],
['OS=="solaris"', {
'cflags': [ '-fno-omit-frame-pointer' ],
# pull in V8's postmortem metadata
'ldflags': [ '-Wl,-z,allextract' ]
}],
],
},
}

View File

@ -0,0 +1,57 @@
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [0.11.29], [https://github.com/joyent/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects] UV_EXTRA_AUTOMAKE_FLAGS)
AC_CANONICAL_HOST
AC_ENABLE_SHARED
AC_ENABLE_STATIC
AC_PROG_CC
AM_PROG_CC_C_O
# AM_PROG_AR is not available in automake v0.11 but it's essential in v0.12.
m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
LT_INIT
# TODO(bnoordhuis) Check for -pthread vs. -pthreads
AC_CHECK_LIB([dl], [dlopen])
AC_CHECK_LIB([kstat], [kstat_lookup])
AC_CHECK_LIB([kvm], [kvm_open])
AC_CHECK_LIB([nsl], [gethostbyname])
AC_CHECK_LIB([perfstat], [perfstat_cpu])
AC_CHECK_LIB([pthread], [pthread_mutex_init])
AC_CHECK_LIB([rt], [clock_gettime])
AC_CHECK_LIB([sendfile], [sendfile])
AC_CHECK_LIB([socket], [socket])
AC_SYS_LARGEFILE
AM_CONDITIONAL([AIX], [AS_CASE([$host_os],[aix*], [true], [false])])
AM_CONDITIONAL([ANDROID],[AS_CASE([$host_os],[linux-android*],[true], [false])])
AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])])
AM_CONDITIONAL([FREEBSD],[AS_CASE([$host_os],[freebsd*], [true], [false])])
AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])])
AM_CONDITIONAL([NETBSD], [AS_CASE([$host_os],[netbsd*], [true], [false])])
AM_CONDITIONAL([OPENBSD],[AS_CASE([$host_os],[openbsd*], [true], [false])])
AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])])
AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])])
PANDORA_ENABLE_DTRACE
AC_CHECK_PROG(PKG_CONFIG, pkg-config, yes)
AM_CONDITIONAL([HAVE_PKG_CONFIG], [test "x$PKG_CONFIG" != "x"])
AS_IF([test "x$PKG_CONFIG" != "x"], [
AC_CONFIG_FILES([libuv.pc])
])
AC_CONFIG_FILES([Makefile])
AC_OUTPUT

111
outside/libuv_11/gyp_uv.py Executable file
View File

@ -0,0 +1,111 @@
#!/usr/bin/env python
import glob
import platform
import os
import subprocess
import sys
try:
import multiprocessing.synchronize
gyp_parallel_support = True
except ImportError:
gyp_parallel_support = False
CC = os.environ.get('CC', 'cc')
script_dir = os.path.dirname(__file__)
uv_root = os.path.normpath(script_dir)
output_dir = os.path.join(os.path.abspath(uv_root), 'out')
sys.path.insert(0, os.path.join(uv_root, 'build', 'gyp', 'pylib'))
try:
import gyp
except ImportError:
print('You need to install gyp in build/gyp first. See the README.')
sys.exit(42)
def host_arch():
machine = platform.machine()
if machine == 'i386': return 'ia32'
if machine == 'x86_64': return 'x64'
if machine.startswith('arm'): return 'arm'
if machine.startswith('mips'): return 'mips'
return machine # Return as-is and hope for the best.
def compiler_version():
proc = subprocess.Popen(CC.split() + ['--version'], stdout=subprocess.PIPE)
is_clang = 'clang' in proc.communicate()[0].split('\n')[0]
proc = subprocess.Popen(CC.split() + ['-dumpversion'], stdout=subprocess.PIPE)
version = proc.communicate()[0].split('.')
version = map(int, version[:2])
version = tuple(version)
return (version, is_clang)
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
if sys.platform == 'win32':
args.append(os.path.join(uv_root, 'uv.gyp'))
common_fn = os.path.join(uv_root, 'common.gypi')
options_fn = os.path.join(uv_root, 'options.gypi')
# we force vs 2010 over 2008 which would otherwise be the default for gyp
if not os.environ.get('GYP_MSVS_VERSION'):
os.environ['GYP_MSVS_VERSION'] = '2010'
else:
args.append(os.path.join(os.path.abspath(uv_root), 'uv.gyp'))
common_fn = os.path.join(os.path.abspath(uv_root), 'common.gypi')
options_fn = os.path.join(os.path.abspath(uv_root), 'options.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
if os.path.exists(options_fn):
args.extend(['-I', options_fn])
args.append('--depth=' + uv_root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32':
if '-f' not in args:
args.extend('-f make'.split())
if 'eclipse' not in args and 'ninja' not in args:
args.extend(['-Goutput_dir=' + output_dir])
args.extend(['--generator-output', output_dir])
(major, minor), is_clang = compiler_version()
args.append('-Dgcc_version=%d' % (10 * major + minor))
args.append('-Dclang=%d' % int(is_clang))
if not any(a.startswith('-Dhost_arch=') for a in args):
args.append('-Dhost_arch=%s' % host_arch())
if not any(a.startswith('-Dtarget_arch=') for a in args):
args.append('-Dtarget_arch=%s' % host_arch())
if not any(a.startswith('-Duv_library=') for a in args):
args.append('-Duv_library=static_library')
if not any(a.startswith('-Dcomponent=') for a in args):
args.append('-Dcomponent=static_library')
# Some platforms (OpenBSD for example) don't have multiprocessing.synchronize
# so gyp must be run with --no-parallel
if not gyp_parallel_support:
args.append('--no-parallel')
gyp_args = list(args)
print gyp_args
run_gyp(gyp_args)

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 1995, 1999
* Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
*/
#ifndef _IFADDRS_H_
#define _IFADDRS_H_
struct ifaddrs {
struct ifaddrs *ifa_next;
char *ifa_name;
unsigned int ifa_flags;
struct sockaddr *ifa_addr;
struct sockaddr *ifa_netmask;
struct sockaddr *ifa_dstaddr;
void *ifa_data;
};
/*
* This may have been defined in <net/if.h>. Note that if <net/if.h> is
* to be included it must be included before this header file.
*/
#ifndef ifa_broadaddr
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
#endif
#include <sys/cdefs.h>
__BEGIN_DECLS
extern int getifaddrs(struct ifaddrs **ifap);
extern void freeifaddrs(struct ifaddrs *ifa);
__END_DECLS
#endif

View File

@ -0,0 +1,72 @@
/* Copyright (c) 2013, Sony Mobile Communications AB
* Copyright (c) 2012, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GOOGLE_BREAKPAD_COMMON_ANDROID_TESTING_PTHREAD_FIXES_H
#define GOOGLE_BREAKPAD_COMMON_ANDROID_TESTING_PTHREAD_FIXES_H
#include <pthread.h>
/*Android doesn't provide pthread_barrier_t for now.*/
#ifndef PTHREAD_BARRIER_SERIAL_THREAD
/* Anything except 0 will do here.*/
#define PTHREAD_BARRIER_SERIAL_THREAD 0x12345
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
unsigned count;
} pthread_barrier_t;
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count);
int pthread_barrier_wait(pthread_barrier_t* barrier);
int pthread_barrier_destroy(pthread_barrier_t *barrier);
#endif /* defined(PTHREAD_BARRIER_SERIAL_THREAD) */
int pthread_yield(void);
/* Workaround pthread_sigmask() returning EINVAL on versions < 4.1 by
* replacing all calls to pthread_sigmask with sigprocmask. See:
* https://android.googlesource.com/platform/bionic/+/9bf330b5
* https://code.google.com/p/android/issues/detail?id=15337
*/
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
#ifdef pthread_sigmask
#undef pthread_sigmask
#endif
#define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
#endif /* GOOGLE_BREAKPAD_COMMON_ANDROID_TESTING_PTHREAD_FIXES_H */

View File

@ -0,0 +1,247 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]

View File

@ -0,0 +1,768 @@
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UV_TREE_H_
#define UV_TREE_H_
#ifndef UV__UNUSED
# if __GNUC__
# define UV__UNUSED __attribute__((unused))
# else
# define UV__UNUSED
# endif
#endif
/*
* This file defines data structures for different types of trees:
* splay trees and red-black trees.
*
* A splay tree is a self-organizing data structure. Every operation
* on the tree causes a splay to happen. The splay moves the requested
* node to the root of the tree and partly rebalances it.
*
* This has the benefit that request locality causes faster lookups as
* the requested nodes move to the top of the tree. On the other hand,
* every lookup causes memory writes.
*
* The Balance Theorem bounds the total access time for m operations
* and n inserts on an initially empty tree as O((m + n)lg n). The
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
#define SPLAY_HEAD(name, type) \
struct name { \
struct type *sph_root; /* root of the tree */ \
}
#define SPLAY_INITIALIZER(root) \
{ NULL }
#define SPLAY_INIT(root) do { \
(root)->sph_root = NULL; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ENTRY(type) \
struct { \
struct type *spe_left; /* left element */ \
struct type *spe_right; /* right element */ \
}
#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
#define SPLAY_ROOT(head) (head)->sph_root
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
(head)->sph_root = tmp; \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKLEFT(head, tmp, field) do { \
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_LINKRIGHT(head, tmp, field) do { \
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
tmp = (head)->sph_root; \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} while (/*CONSTCOND*/ 0)
#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
void name##_SPLAY(struct name *, struct type *); \
void name##_SPLAY_MINMAX(struct name *, int); \
struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
\
/* Finds the node with the same key as elm */ \
static __inline struct type * \
name##_SPLAY_FIND(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) \
return(NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) \
return (head->sph_root); \
return (NULL); \
} \
\
static __inline struct type * \
name##_SPLAY_NEXT(struct name *head, struct type *elm) \
{ \
name##_SPLAY(head, elm); \
if (SPLAY_RIGHT(elm, field) != NULL) { \
elm = SPLAY_RIGHT(elm, field); \
while (SPLAY_LEFT(elm, field) != NULL) { \
elm = SPLAY_LEFT(elm, field); \
} \
} else \
elm = NULL; \
return (elm); \
} \
\
static __inline struct type * \
name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
return (SPLAY_ROOT(head)); \
}
/* Main splay operation.
* Moves node close to the key of elm to top
*/
#define SPLAY_GENERATE(name, type, field, cmp) \
struct type * \
name##_SPLAY_INSERT(struct name *head, struct type *elm) \
{ \
if (SPLAY_EMPTY(head)) { \
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
} else { \
int __comp; \
name##_SPLAY(head, elm); \
__comp = (cmp)(elm, (head)->sph_root); \
if(__comp < 0) { \
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
SPLAY_LEFT((head)->sph_root, field) = NULL; \
} else if (__comp > 0) { \
SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
SPLAY_LEFT(elm, field) = (head)->sph_root; \
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
} else \
return ((head)->sph_root); \
} \
(head)->sph_root = (elm); \
return (NULL); \
} \
\
struct type * \
name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *__tmp; \
if (SPLAY_EMPTY(head)) \
return (NULL); \
name##_SPLAY(head, elm); \
if ((cmp)(elm, (head)->sph_root) == 0) { \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
} else { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
name##_SPLAY(head, elm); \
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
} \
return (elm); \
} \
return (NULL); \
} \
\
void \
name##_SPLAY(struct name *head, struct type *elm) \
{ \
struct type __node, *__left, *__right, *__tmp; \
int __comp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
__left = __right = &__node; \
\
while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if ((cmp)(elm, __tmp) > 0){ \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
} \
\
/* Splay with either the minimum or the maximum element \
* Used to find minimum or maximum element in tree. \
*/ \
void name##_SPLAY_MINMAX(struct name *head, int __comp) \
{ \
struct type __node, *__left, *__right, *__tmp; \
\
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
__left = __right = &__node; \
\
while (1) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp < 0){ \
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKLEFT(head, __right, field); \
} else if (__comp > 0) { \
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
if (__tmp == NULL) \
break; \
if (__comp > 0) { \
SPLAY_ROTATE_LEFT(head, __tmp, field); \
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
break; \
} \
SPLAY_LINKRIGHT(head, __left, field); \
} \
} \
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
}
#define SPLAY_NEGINF -1
#define SPLAY_INF 1
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
: name##_SPLAY_MIN_MAX(x, SPLAY_INF))
#define SPLAY_FOREACH(x, name, head) \
for ((x) = SPLAY_MIN(name, head); \
(x) != NULL; \
(x) = SPLAY_NEXT(name, head, x))
/* Macros that define a red-black tree */
#define RB_HEAD(name, type) \
struct name { \
struct type *rbh_root; /* root of the tree */ \
}
#define RB_INITIALIZER(root) \
{ NULL }
#define RB_INIT(root) do { \
(root)->rbh_root = NULL; \
} while (/*CONSTCOND*/ 0)
#define RB_BLACK 0
#define RB_RED 1
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#define RB_LEFT(elm, field) (elm)->field.rbe_left
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
#define RB_COLOR(elm, field) (elm)->field.rbe_color
#define RB_ROOT(head) (head)->rbh_root
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET(elm, parent, field) do { \
RB_PARENT(elm, field) = parent; \
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
RB_COLOR(elm, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#define RB_SET_BLACKRED(black, red, field) do { \
RB_COLOR(black, field) = RB_BLACK; \
RB_COLOR(red, field) = RB_RED; \
} while (/*CONSTCOND*/ 0)
#ifndef RB_AUGMENT
#define RB_AUGMENT(x) do {} while (0)
#endif
#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
(tmp) = RB_RIGHT(elm, field); \
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_LEFT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
(tmp) = RB_LEFT(elm, field); \
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
} \
RB_AUGMENT(elm); \
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
else \
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
} else \
(head)->rbh_root = (tmp); \
RB_RIGHT(tmp, field) = (elm); \
RB_PARENT(elm, field) = (tmp); \
RB_AUGMENT(tmp); \
if ((RB_PARENT(tmp, field))) \
RB_AUGMENT(RB_PARENT(tmp, field)); \
} while (/*CONSTCOND*/ 0)
/* Generates prototypes and inline functions */
#define RB_PROTOTYPE(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
attr struct type *name##_RB_INSERT(struct name *, struct type *); \
attr struct type *name##_RB_FIND(struct name *, struct type *); \
attr struct type *name##_RB_NFIND(struct name *, struct type *); \
attr struct type *name##_RB_NEXT(struct type *); \
attr struct type *name##_RB_PREV(struct type *); \
attr struct type *name##_RB_MINMAX(struct name *, int); \
\
/* Main rb operation.
* Moves node close to the key of elm to top
*/
#define RB_GENERATE(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp,)
#define RB_GENERATE_STATIC(name, type, field, cmp) \
RB_GENERATE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
attr void \
name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
{ \
struct type *parent, *gparent, *tmp; \
while ((parent = RB_PARENT(elm, field)) != NULL && \
RB_COLOR(parent, field) == RB_RED) { \
gparent = RB_PARENT(parent, field); \
if (parent == RB_LEFT(gparent, field)) { \
tmp = RB_RIGHT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field); \
elm = gparent; \
continue; \
} \
if (RB_RIGHT(parent, field) == elm) { \
RB_ROTATE_LEFT(head, parent, tmp, field); \
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
} else { \
tmp = RB_LEFT(gparent, field); \
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
RB_COLOR(tmp, field) = RB_BLACK; \
RB_SET_BLACKRED(parent, gparent, field); \
elm = gparent; \
continue; \
} \
if (RB_LEFT(parent, field) == elm) { \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
tmp = parent; \
parent = elm; \
elm = tmp; \
} \
RB_SET_BLACKRED(parent, gparent, field); \
RB_ROTATE_LEFT(head, gparent, tmp, field); \
} \
} \
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
} \
\
attr void \
name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, \
struct type *elm) \
{ \
struct type *tmp; \
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
elm != RB_ROOT(head)) { \
if (RB_LEFT(parent, field) == elm) { \
tmp = RB_RIGHT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_LEFT(head, parent, tmp, field); \
tmp = RB_RIGHT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
struct type *oleft; \
if ((oleft = RB_LEFT(tmp, field)) \
!= NULL) \
RB_COLOR(oleft, field) = RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_RIGHT(head, tmp, oleft, field); \
tmp = RB_RIGHT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_RIGHT(tmp, field)) \
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
RB_ROTATE_LEFT(head, parent, tmp, field); \
elm = RB_ROOT(head); \
break; \
} \
} else { \
tmp = RB_LEFT(parent, field); \
if (RB_COLOR(tmp, field) == RB_RED) { \
RB_SET_BLACKRED(tmp, parent, field); \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
tmp = RB_LEFT(parent, field); \
} \
if ((RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
(RB_RIGHT(tmp, field) == NULL || \
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
RB_COLOR(tmp, field) = RB_RED; \
elm = parent; \
parent = RB_PARENT(elm, field); \
} else { \
if (RB_LEFT(tmp, field) == NULL || \
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
struct type *oright; \
if ((oright = RB_RIGHT(tmp, field)) \
!= NULL) \
RB_COLOR(oright, field) = RB_BLACK; \
RB_COLOR(tmp, field) = RB_RED; \
RB_ROTATE_LEFT(head, tmp, oright, field); \
tmp = RB_LEFT(parent, field); \
} \
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
RB_COLOR(parent, field) = RB_BLACK; \
if (RB_LEFT(tmp, field)) \
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
RB_ROTATE_RIGHT(head, parent, tmp, field); \
elm = RB_ROOT(head); \
break; \
} \
} \
} \
if (elm) \
RB_COLOR(elm, field) = RB_BLACK; \
} \
\
attr struct type * \
name##_RB_REMOVE(struct name *head, struct type *elm) \
{ \
struct type *child, *parent, *old = elm; \
int color; \
if (RB_LEFT(elm, field) == NULL) \
child = RB_RIGHT(elm, field); \
else if (RB_RIGHT(elm, field) == NULL) \
child = RB_LEFT(elm, field); \
else { \
struct type *left; \
elm = RB_RIGHT(elm, field); \
while ((left = RB_LEFT(elm, field)) != NULL) \
elm = left; \
child = RB_RIGHT(elm, field); \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
if (RB_PARENT(elm, field) == old) \
parent = elm; \
(elm)->field = (old)->field; \
if (RB_PARENT(old, field)) { \
if (RB_LEFT(RB_PARENT(old, field), field) == old) \
RB_LEFT(RB_PARENT(old, field), field) = elm; \
else \
RB_RIGHT(RB_PARENT(old, field), field) = elm; \
RB_AUGMENT(RB_PARENT(old, field)); \
} else \
RB_ROOT(head) = elm; \
RB_PARENT(RB_LEFT(old, field), field) = elm; \
if (RB_RIGHT(old, field)) \
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
if (parent) { \
left = parent; \
do { \
RB_AUGMENT(left); \
} while ((left = RB_PARENT(left, field)) != NULL); \
} \
goto color; \
} \
parent = RB_PARENT(elm, field); \
color = RB_COLOR(elm, field); \
if (child) \
RB_PARENT(child, field) = parent; \
if (parent) { \
if (RB_LEFT(parent, field) == elm) \
RB_LEFT(parent, field) = child; \
else \
RB_RIGHT(parent, field) = child; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = child; \
color: \
if (color == RB_BLACK) \
name##_RB_REMOVE_COLOR(head, parent, child); \
return (old); \
} \
\
/* Inserts a node into the RB tree */ \
attr struct type * \
name##_RB_INSERT(struct name *head, struct type *elm) \
{ \
struct type *tmp; \
struct type *parent = NULL; \
int comp = 0; \
tmp = RB_ROOT(head); \
while (tmp) { \
parent = tmp; \
comp = (cmp)(elm, parent); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
RB_SET(elm, parent, field); \
if (parent != NULL) { \
if (comp < 0) \
RB_LEFT(parent, field) = elm; \
else \
RB_RIGHT(parent, field) = elm; \
RB_AUGMENT(parent); \
} else \
RB_ROOT(head) = elm; \
name##_RB_INSERT_COLOR(head, elm); \
return (NULL); \
} \
\
/* Finds the node with the same key as elm */ \
attr struct type * \
name##_RB_FIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) \
tmp = RB_LEFT(tmp, field); \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (NULL); \
} \
\
/* Finds the first node greater than or equal to the search key */ \
attr struct type * \
name##_RB_NFIND(struct name *head, struct type *elm) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *res = NULL; \
int comp; \
while (tmp) { \
comp = cmp(elm, tmp); \
if (comp < 0) { \
res = tmp; \
tmp = RB_LEFT(tmp, field); \
} \
else if (comp > 0) \
tmp = RB_RIGHT(tmp, field); \
else \
return (tmp); \
} \
return (res); \
} \
\
/* ARGSUSED */ \
attr struct type * \
name##_RB_NEXT(struct type *elm) \
{ \
if (RB_RIGHT(elm, field)) { \
elm = RB_RIGHT(elm, field); \
while (RB_LEFT(elm, field)) \
elm = RB_LEFT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
/* ARGSUSED */ \
attr struct type * \
name##_RB_PREV(struct type *elm) \
{ \
if (RB_LEFT(elm, field)) { \
elm = RB_LEFT(elm, field); \
while (RB_RIGHT(elm, field)) \
elm = RB_RIGHT(elm, field); \
} else { \
if (RB_PARENT(elm, field) && \
(elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
else { \
while (RB_PARENT(elm, field) && \
(elm == RB_LEFT(RB_PARENT(elm, field), field))) \
elm = RB_PARENT(elm, field); \
elm = RB_PARENT(elm, field); \
} \
} \
return (elm); \
} \
\
attr struct type * \
name##_RB_MINMAX(struct name *head, int val) \
{ \
struct type *tmp = RB_ROOT(head); \
struct type *parent = NULL; \
while (tmp) { \
parent = tmp; \
if (val < 0) \
tmp = RB_LEFT(tmp, field); \
else \
tmp = RB_RIGHT(tmp, field); \
} \
return (parent); \
}
#define RB_NEGINF -1
#define RB_INF 1
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
#define RB_PREV(name, x, y) name##_RB_PREV(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_FOREACH(x, name, head) \
for ((x) = RB_MIN(name, head); \
(x) != NULL; \
(x) = name##_RB_NEXT(x))
#define RB_FOREACH_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_SAFE(x, name, head, y) \
for ((x) = RB_MIN(name, head); \
((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE(x, name, head) \
for ((x) = RB_MAX(name, head); \
(x) != NULL; \
(x) = name##_RB_PREV(x))
#define RB_FOREACH_REVERSE_FROM(x, name, y) \
for ((x) = (y); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
for ((x) = RB_MAX(name, head); \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#endif /* UV_TREE_H_ */

View File

@ -0,0 +1,32 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_AIX_H
#define UV_AIX_H
#define UV_PLATFORM_LOOP_FIELDS \
int fs_fd; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
char *dir_filename; \
#endif /* UV_AIX_H */

View File

@ -0,0 +1,34 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_BSD_H
#define UV_BSD_H
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
#define UV_IO_PRIVATE_PLATFORM_FIELDS \
int rcount; \
int wcount; \
#define UV_HAVE_KQUEUE 1
#endif /* UV_BSD_H */

View File

@ -0,0 +1,61 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_DARWIN_H
#define UV_DARWIN_H
#if defined(__APPLE__) && defined(__MACH__)
# include <mach/mach.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <TargetConditionals.h>
# define UV_PLATFORM_SEM_T semaphore_t
#endif
#define UV_IO_PRIVATE_PLATFORM_FIELDS \
int rcount; \
int wcount; \
#define UV_PLATFORM_LOOP_FIELDS \
uv_thread_t cf_thread; \
void* _cf_reserved; \
void* cf_state; \
uv_mutex_t cf_mutex; \
uv_sem_t cf_sem; \
void* cf_signals[2]; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
char* realpath; \
int realpath_len; \
int cf_flags; \
uv_async_t* cf_cb; \
void* cf_events[2]; \
void* cf_member[2]; \
int cf_error; \
uv_mutex_t cf_mutex; \
#define UV_STREAM_PRIVATE_PLATFORM_FIELDS \
void* select; \
#define UV_HAVE_KQUEUE 1
#endif /* UV_DARWIN_H */

View File

@ -0,0 +1,402 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_ERRNO_H_
#define UV_ERRNO_H_
#include <errno.h>
#define UV__EOF (-4095)
#define UV__UNKNOWN (-4094)
#define UV__EAI_ADDRFAMILY (-3000)
#define UV__EAI_AGAIN (-3001)
#define UV__EAI_BADFLAGS (-3002)
#define UV__EAI_CANCELED (-3003)
#define UV__EAI_FAIL (-3004)
#define UV__EAI_FAMILY (-3005)
#define UV__EAI_MEMORY (-3006)
#define UV__EAI_NODATA (-3007)
#define UV__EAI_NONAME (-3008)
#define UV__EAI_OVERFLOW (-3009)
#define UV__EAI_SERVICE (-3010)
#define UV__EAI_SOCKTYPE (-3011)
#define UV__EAI_BADHINTS (-3013)
#define UV__EAI_PROTOCOL (-3014)
/* Only map to the system errno on non-Windows platforms. It's apparently
* a fairly common practice for Windows programmers to redefine errno codes.
*/
#if defined(E2BIG) && !defined(_WIN32)
# define UV__E2BIG (-E2BIG)
#else
# define UV__E2BIG (-4093)
#endif
#if defined(EACCES) && !defined(_WIN32)
# define UV__EACCES (-EACCES)
#else
# define UV__EACCES (-4092)
#endif
#if defined(EADDRINUSE) && !defined(_WIN32)
# define UV__EADDRINUSE (-EADDRINUSE)
#else
# define UV__EADDRINUSE (-4091)
#endif
#if defined(EADDRNOTAVAIL) && !defined(_WIN32)
# define UV__EADDRNOTAVAIL (-EADDRNOTAVAIL)
#else
# define UV__EADDRNOTAVAIL (-4090)
#endif
#if defined(EAFNOSUPPORT) && !defined(_WIN32)
# define UV__EAFNOSUPPORT (-EAFNOSUPPORT)
#else
# define UV__EAFNOSUPPORT (-4089)
#endif
#if defined(EAGAIN) && !defined(_WIN32)
# define UV__EAGAIN (-EAGAIN)
#else
# define UV__EAGAIN (-4088)
#endif
#if defined(EALREADY) && !defined(_WIN32)
# define UV__EALREADY (-EALREADY)
#else
# define UV__EALREADY (-4084)
#endif
#if defined(EBADF) && !defined(_WIN32)
# define UV__EBADF (-EBADF)
#else
# define UV__EBADF (-4083)
#endif
#if defined(EBUSY) && !defined(_WIN32)
# define UV__EBUSY (-EBUSY)
#else
# define UV__EBUSY (-4082)
#endif
#if defined(ECANCELED) && !defined(_WIN32)
# define UV__ECANCELED (-ECANCELED)
#else
# define UV__ECANCELED (-4081)
#endif
#if defined(ECHARSET) && !defined(_WIN32)
# define UV__ECHARSET (-ECHARSET)
#else
# define UV__ECHARSET (-4080)
#endif
#if defined(ECONNABORTED) && !defined(_WIN32)
# define UV__ECONNABORTED (-ECONNABORTED)
#else
# define UV__ECONNABORTED (-4079)
#endif
#if defined(ECONNREFUSED) && !defined(_WIN32)
# define UV__ECONNREFUSED (-ECONNREFUSED)
#else
# define UV__ECONNREFUSED (-4078)
#endif
#if defined(ECONNRESET) && !defined(_WIN32)
# define UV__ECONNRESET (-ECONNRESET)
#else
# define UV__ECONNRESET (-4077)
#endif
#if defined(EDESTADDRREQ) && !defined(_WIN32)
# define UV__EDESTADDRREQ (-EDESTADDRREQ)
#else
# define UV__EDESTADDRREQ (-4076)
#endif
#if defined(EEXIST) && !defined(_WIN32)
# define UV__EEXIST (-EEXIST)
#else
# define UV__EEXIST (-4075)
#endif
#if defined(EFAULT) && !defined(_WIN32)
# define UV__EFAULT (-EFAULT)
#else
# define UV__EFAULT (-4074)
#endif
#if defined(EHOSTUNREACH) && !defined(_WIN32)
# define UV__EHOSTUNREACH (-EHOSTUNREACH)
#else
# define UV__EHOSTUNREACH (-4073)
#endif
#if defined(EINTR) && !defined(_WIN32)
# define UV__EINTR (-EINTR)
#else
# define UV__EINTR (-4072)
#endif
#if defined(EINVAL) && !defined(_WIN32)
# define UV__EINVAL (-EINVAL)
#else
# define UV__EINVAL (-4071)
#endif
#if defined(EIO) && !defined(_WIN32)
# define UV__EIO (-EIO)
#else
# define UV__EIO (-4070)
#endif
#if defined(EISCONN) && !defined(_WIN32)
# define UV__EISCONN (-EISCONN)
#else
# define UV__EISCONN (-4069)
#endif
#if defined(EISDIR) && !defined(_WIN32)
# define UV__EISDIR (-EISDIR)
#else
# define UV__EISDIR (-4068)
#endif
#if defined(ELOOP) && !defined(_WIN32)
# define UV__ELOOP (-ELOOP)
#else
# define UV__ELOOP (-4067)
#endif
#if defined(EMFILE) && !defined(_WIN32)
# define UV__EMFILE (-EMFILE)
#else
# define UV__EMFILE (-4066)
#endif
#if defined(EMSGSIZE) && !defined(_WIN32)
# define UV__EMSGSIZE (-EMSGSIZE)
#else
# define UV__EMSGSIZE (-4065)
#endif
#if defined(ENAMETOOLONG) && !defined(_WIN32)
# define UV__ENAMETOOLONG (-ENAMETOOLONG)
#else
# define UV__ENAMETOOLONG (-4064)
#endif
#if defined(ENETDOWN) && !defined(_WIN32)
# define UV__ENETDOWN (-ENETDOWN)
#else
# define UV__ENETDOWN (-4063)
#endif
#if defined(ENETUNREACH) && !defined(_WIN32)
# define UV__ENETUNREACH (-ENETUNREACH)
#else
# define UV__ENETUNREACH (-4062)
#endif
#if defined(ENFILE) && !defined(_WIN32)
# define UV__ENFILE (-ENFILE)
#else
# define UV__ENFILE (-4061)
#endif
#if defined(ENOBUFS) && !defined(_WIN32)
# define UV__ENOBUFS (-ENOBUFS)
#else
# define UV__ENOBUFS (-4060)
#endif
#if defined(ENODEV) && !defined(_WIN32)
# define UV__ENODEV (-ENODEV)
#else
# define UV__ENODEV (-4059)
#endif
#if defined(ENOENT) && !defined(_WIN32)
# define UV__ENOENT (-ENOENT)
#else
# define UV__ENOENT (-4058)
#endif
#if defined(ENOMEM) && !defined(_WIN32)
# define UV__ENOMEM (-ENOMEM)
#else
# define UV__ENOMEM (-4057)
#endif
#if defined(ENONET) && !defined(_WIN32)
# define UV__ENONET (-ENONET)
#else
# define UV__ENONET (-4056)
#endif
#if defined(ENOSPC) && !defined(_WIN32)
# define UV__ENOSPC (-ENOSPC)
#else
# define UV__ENOSPC (-4055)
#endif
#if defined(ENOSYS) && !defined(_WIN32)
# define UV__ENOSYS (-ENOSYS)
#else
# define UV__ENOSYS (-4054)
#endif
#if defined(ENOTCONN) && !defined(_WIN32)
# define UV__ENOTCONN (-ENOTCONN)
#else
# define UV__ENOTCONN (-4053)
#endif
#if defined(ENOTDIR) && !defined(_WIN32)
# define UV__ENOTDIR (-ENOTDIR)
#else
# define UV__ENOTDIR (-4052)
#endif
#if defined(ENOTEMPTY) && !defined(_WIN32)
# define UV__ENOTEMPTY (-ENOTEMPTY)
#else
# define UV__ENOTEMPTY (-4051)
#endif
#if defined(ENOTSOCK) && !defined(_WIN32)
# define UV__ENOTSOCK (-ENOTSOCK)
#else
# define UV__ENOTSOCK (-4050)
#endif
#if defined(ENOTSUP) && !defined(_WIN32)
# define UV__ENOTSUP (-ENOTSUP)
#else
# define UV__ENOTSUP (-4049)
#endif
#if defined(EPERM) && !defined(_WIN32)
# define UV__EPERM (-EPERM)
#else
# define UV__EPERM (-4048)
#endif
#if defined(EPIPE) && !defined(_WIN32)
# define UV__EPIPE (-EPIPE)
#else
# define UV__EPIPE (-4047)
#endif
#if defined(EPROTO) && !defined(_WIN32)
# define UV__EPROTO (-EPROTO)
#else
# define UV__EPROTO (-4046)
#endif
#if defined(EPROTONOSUPPORT) && !defined(_WIN32)
# define UV__EPROTONOSUPPORT (-EPROTONOSUPPORT)
#else
# define UV__EPROTONOSUPPORT (-4045)
#endif
#if defined(EPROTOTYPE) && !defined(_WIN32)
# define UV__EPROTOTYPE (-EPROTOTYPE)
#else
# define UV__EPROTOTYPE (-4044)
#endif
#if defined(EROFS) && !defined(_WIN32)
# define UV__EROFS (-EROFS)
#else
# define UV__EROFS (-4043)
#endif
#if defined(ESHUTDOWN) && !defined(_WIN32)
# define UV__ESHUTDOWN (-ESHUTDOWN)
#else
# define UV__ESHUTDOWN (-4042)
#endif
#if defined(ESPIPE) && !defined(_WIN32)
# define UV__ESPIPE (-ESPIPE)
#else
# define UV__ESPIPE (-4041)
#endif
#if defined(ESRCH) && !defined(_WIN32)
# define UV__ESRCH (-ESRCH)
#else
# define UV__ESRCH (-4040)
#endif
#if defined(ETIMEDOUT) && !defined(_WIN32)
# define UV__ETIMEDOUT (-ETIMEDOUT)
#else
# define UV__ETIMEDOUT (-4039)
#endif
#if defined(ETXTBSY) && !defined(_WIN32)
# define UV__ETXTBSY (-ETXTBSY)
#else
# define UV__ETXTBSY (-4038)
#endif
#if defined(EXDEV) && !defined(_WIN32)
# define UV__EXDEV (-EXDEV)
#else
# define UV__EXDEV (-4037)
#endif
#if defined(EFBIG) && !defined(_WIN32)
# define UV__EFBIG (-EFBIG)
#else
# define UV__EFBIG (-4036)
#endif
#if defined(ENOPROTOOPT) && !defined(_WIN32)
# define UV__ENOPROTOOPT (-ENOPROTOOPT)
#else
# define UV__ENOPROTOOPT (-4035)
#endif
#if defined(ERANGE) && !defined(_WIN32)
# define UV__ERANGE (-ERANGE)
#else
# define UV__ERANGE (-4034)
#endif
#if defined(ENXIO) && !defined(_WIN32)
# define UV__ENXIO (-ENXIO)
#else
# define UV__ENXIO (-4033)
#endif
#if defined(EMLINK) && !defined(_WIN32)
# define UV__EMLINK (-EMLINK)
#else
# define UV__EMLINK (-4032)
#endif
#endif /* UV_ERRNO_H_ */

View File

@ -0,0 +1,34 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_LINUX_H
#define UV_LINUX_H
#define UV_PLATFORM_LOOP_FIELDS \
uv__io_t inotify_read_watcher; \
void* inotify_watchers; \
int inotify_fd; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
void* watchers[2]; \
int wd; \
#endif /* UV_LINUX_H */

View File

@ -0,0 +1,44 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_SUNOS_H
#define UV_SUNOS_H
#include <sys/port.h>
#include <port.h>
/* For the sake of convenience and reduced #ifdef-ery in src/unix/sunos.c,
* add the fs_event fields even when this version of SunOS doesn't support
* file watching.
*/
#define UV_PLATFORM_LOOP_FIELDS \
uv__io_t fs_event_watcher; \
int fs_fd; \
#if defined(PORT_SOURCE_FILE)
# define UV_PLATFORM_FS_EVENT_FIELDS \
file_obj_t fo; \
int fd; \
#endif /* defined(PORT_SOURCE_FILE) */
#endif /* UV_SUNOS_H */

View File

@ -0,0 +1,37 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* This file is private to libuv. It provides common functionality to both
* Windows and Unix backends.
*/
#ifndef UV_THREADPOOL_H_
#define UV_THREADPOOL_H_
struct uv__work {
void (*work)(struct uv__work *w);
void (*done)(struct uv__work *w, int status);
struct uv_loop_s* loop;
void* wq[2];
};
#endif /* UV_THREADPOOL_H_ */

View File

@ -0,0 +1,345 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_UNIX_H
#define UV_UNIX_H
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <termios.h>
#include <pwd.h>
#include <semaphore.h>
#include <pthread.h>
#ifdef __ANDROID__
#include "pthread-fixes.h"
#endif
#include <signal.h>
#include "uv-threadpool.h"
#if defined(__linux__)
# include "uv-linux.h"
#elif defined(_AIX)
# include "uv-aix.h"
#elif defined(__sun)
# include "uv-sunos.h"
#elif defined(__APPLE__)
# include "uv-darwin.h"
#elif defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# include "uv-bsd.h"
#endif
#ifndef NI_MAXHOST
# define NI_MAXHOST 1025
#endif
#ifndef NI_MAXSERV
# define NI_MAXSERV 32
#endif
#ifndef UV_IO_PRIVATE_PLATFORM_FIELDS
# define UV_IO_PRIVATE_PLATFORM_FIELDS /* empty */
#endif
struct uv__io_s;
struct uv__async;
struct uv_loop_s;
typedef void (*uv__io_cb)(struct uv_loop_s* loop,
struct uv__io_s* w,
unsigned int events);
typedef struct uv__io_s uv__io_t;
struct uv__io_s {
uv__io_cb cb;
void* pending_queue[2];
void* watcher_queue[2];
unsigned int pevents; /* Pending event mask i.e. mask at next tick. */
unsigned int events; /* Current event mask. */
int fd;
UV_IO_PRIVATE_PLATFORM_FIELDS
};
typedef void (*uv__async_cb)(struct uv_loop_s* loop,
struct uv__async* w,
unsigned int nevents);
struct uv__async {
uv__async_cb cb;
uv__io_t io_watcher;
int wfd;
};
#ifndef UV_PLATFORM_SEM_T
# define UV_PLATFORM_SEM_T sem_t
#endif
#ifndef UV_PLATFORM_LOOP_FIELDS
# define UV_PLATFORM_LOOP_FIELDS /* empty */
#endif
#ifndef UV_PLATFORM_FS_EVENT_FIELDS
# define UV_PLATFORM_FS_EVENT_FIELDS /* empty */
#endif
#ifndef UV_STREAM_PRIVATE_PLATFORM_FIELDS
# define UV_STREAM_PRIVATE_PLATFORM_FIELDS /* empty */
#endif
/* Note: May be cast to struct iovec. See writev(2). */
typedef struct uv_buf_t {
char* base;
size_t len;
} uv_buf_t;
typedef int uv_file;
typedef int uv_os_sock_t;
#define UV_ONCE_INIT PTHREAD_ONCE_INIT
typedef pthread_once_t uv_once_t;
typedef pthread_t uv_thread_t;
typedef pthread_mutex_t uv_mutex_t;
typedef pthread_rwlock_t uv_rwlock_t;
typedef UV_PLATFORM_SEM_T uv_sem_t;
typedef pthread_cond_t uv_cond_t;
typedef pthread_key_t uv_key_t;
#if defined(__APPLE__) && defined(__MACH__)
typedef struct {
unsigned int n;
unsigned int count;
uv_mutex_t mutex;
uv_sem_t turnstile1;
uv_sem_t turnstile2;
} uv_barrier_t;
#else /* defined(__APPLE__) && defined(__MACH__) */
typedef pthread_barrier_t uv_barrier_t;
#endif /* defined(__APPLE__) && defined(__MACH__) */
/* Platform-specific definitions for uv_spawn support. */
typedef gid_t uv_gid_t;
typedef uid_t uv_uid_t;
typedef struct dirent uv__dirent_t;
#define UV__DT_DIR DT_DIR
/* Platform-specific definitions for uv_dlopen support. */
#define UV_DYNAMIC /* empty */
typedef struct {
void* handle;
char* errmsg;
} uv_lib_t;
#define UV_LOOP_PRIVATE_FIELDS \
unsigned long flags; \
int backend_fd; \
void* pending_queue[2]; \
void* watcher_queue[2]; \
uv__io_t** watchers; \
unsigned int nwatchers; \
unsigned int nfds; \
void* wq[2]; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async; \
uv_rwlock_t cloexec_lock; \
uv_handle_t* closing_handles; \
void* process_handles[1][2]; \
void* prepare_handles[2]; \
void* check_handles[2]; \
void* idle_handles[2]; \
void* async_handles[2]; \
struct uv__async async_watcher; \
struct { \
void* min; \
unsigned int nelts; \
} timer_heap; \
uint64_t timer_counter; \
uint64_t time; \
int signal_pipefd[2]; \
uv__io_t signal_io_watcher; \
uv_signal_t child_watcher; \
int emfile_fd; \
UV_PLATFORM_LOOP_FIELDS \
#define UV_REQ_TYPE_PRIVATE /* empty */
#define UV_REQ_PRIVATE_FIELDS /* empty */
#define UV_PRIVATE_REQ_TYPES /* empty */
#define UV_WRITE_PRIVATE_FIELDS \
void* queue[2]; \
unsigned int write_index; \
uv_buf_t* bufs; \
unsigned int nbufs; \
int error; \
uv_buf_t bufsml[4]; \
#define UV_CONNECT_PRIVATE_FIELDS \
void* queue[2]; \
#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */
#define UV_UDP_SEND_PRIVATE_FIELDS \
void* queue[2]; \
struct sockaddr_storage addr; \
unsigned int nbufs; \
uv_buf_t* bufs; \
ssize_t status; \
uv_udp_send_cb send_cb; \
uv_buf_t bufsml[4]; \
#define UV_HANDLE_PRIVATE_FIELDS \
uv_handle_t* next_closing; \
unsigned int flags; \
#define UV_STREAM_PRIVATE_FIELDS \
uv_connect_t *connect_req; \
uv_shutdown_t *shutdown_req; \
uv__io_t io_watcher; \
void* write_queue[2]; \
void* write_completed_queue[2]; \
uv_connection_cb connection_cb; \
int delayed_error; \
int accepted_fd; \
void* queued_fds; \
UV_STREAM_PRIVATE_PLATFORM_FIELDS \
#define UV_TCP_PRIVATE_FIELDS /* empty */
#define UV_UDP_PRIVATE_FIELDS \
uv_alloc_cb alloc_cb; \
uv_udp_recv_cb recv_cb; \
uv__io_t io_watcher; \
void* write_queue[2]; \
void* write_completed_queue[2]; \
#define UV_PIPE_PRIVATE_FIELDS \
const char* pipe_fname; /* strdup'ed */
#define UV_POLL_PRIVATE_FIELDS \
uv__io_t io_watcher;
#define UV_PREPARE_PRIVATE_FIELDS \
uv_prepare_cb prepare_cb; \
void* queue[2]; \
#define UV_CHECK_PRIVATE_FIELDS \
uv_check_cb check_cb; \
void* queue[2]; \
#define UV_IDLE_PRIVATE_FIELDS \
uv_idle_cb idle_cb; \
void* queue[2]; \
#define UV_ASYNC_PRIVATE_FIELDS \
uv_async_cb async_cb; \
void* queue[2]; \
int pending; \
#define UV_TIMER_PRIVATE_FIELDS \
uv_timer_cb timer_cb; \
void* heap_node[3]; \
uint64_t timeout; \
uint64_t repeat; \
uint64_t start_id;
#define UV_GETADDRINFO_PRIVATE_FIELDS \
struct uv__work work_req; \
uv_getaddrinfo_cb cb; \
struct addrinfo* hints; \
char* hostname; \
char* service; \
struct addrinfo* res; \
int retcode;
#define UV_GETNAMEINFO_PRIVATE_FIELDS \
struct uv__work work_req; \
uv_getnameinfo_cb getnameinfo_cb; \
struct sockaddr_storage storage; \
int flags; \
char host[NI_MAXHOST]; \
char service[NI_MAXSERV]; \
int retcode;
#define UV_PROCESS_PRIVATE_FIELDS \
void* queue[2]; \
int status; \
#define UV_FS_PRIVATE_FIELDS \
const char *new_path; \
uv_file file; \
int flags; \
mode_t mode; \
unsigned int nbufs; \
uv_buf_t* bufs; \
off_t off; \
uv_uid_t uid; \
uv_gid_t gid; \
double atime; \
double mtime; \
struct uv__work work_req; \
uv_buf_t bufsml[4]; \
#define UV_WORK_PRIVATE_FIELDS \
struct uv__work work_req;
#define UV_TTY_PRIVATE_FIELDS \
struct termios orig_termios; \
int mode;
#define UV_SIGNAL_PRIVATE_FIELDS \
/* RB_ENTRY(uv_signal_s) tree_entry; */ \
struct { \
struct uv_signal_s* rbe_left; \
struct uv_signal_s* rbe_right; \
struct uv_signal_s* rbe_parent; \
int rbe_color; \
} tree_entry; \
/* Use two counters here so we don have to fiddle with atomics. */ \
unsigned int caught_signals; \
unsigned int dispatched_signals;
#define UV_FS_EVENT_PRIVATE_FIELDS \
uv_fs_event_cb cb; \
UV_PLATFORM_FS_EVENT_FIELDS \
#endif /* UV_UNIX_H */

View File

@ -0,0 +1,38 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_VERSION_H
#define UV_VERSION_H
/*
* Versions with an even minor version (e.g. 0.6.1 or 1.0.4) are API and ABI
* stable. When the minor version is odd, the API can change between patch
* releases. Make sure you update the -soname directives in configure.ac
* and uv.gyp whenever you bump UV_VERSION_MAJOR or UV_VERSION_MINOR (but
* not UV_VERSION_PATCH.)
*/
#define UV_VERSION_MAJOR 0
#define UV_VERSION_MINOR 11
#define UV_VERSION_PATCH 29
#define UV_VERSION_IS_RELEASE 0
#endif /* UV_VERSION_H */

View File

@ -0,0 +1,639 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0502
#endif
#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
typedef intptr_t ssize_t;
# define _SSIZE_T_
# define _SSIZE_T_DEFINED
#endif
#include <winsock2.h>
#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)
typedef struct pollfd {
SOCKET fd;
short events;
short revents;
} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD;
#endif
#ifndef LOCALE_INVARIANT
# define LOCALE_INVARIANT 0x007f
#endif
#ifndef _malloca
# if defined(_DEBUG)
# define _malloca(size) malloc(size)
# define _freea(ptr) free(ptr)
# else
# define _malloca(size) alloca(size)
# define _freea(ptr)
# endif
#endif
#include <mswsock.h>
#include <ws2tcpip.h>
#include <windows.h>
#include <process.h>
#include <signal.h>
#include <sys/stat.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "tree.h"
#include "uv-threadpool.h"
#define MAX_PIPENAME_LEN 256
#ifndef S_IFLNK
# define S_IFLNK 0xA000
#endif
/* Additional signals supported by uv_signal and or uv_kill. The CRT defines
* the following signals already:
*
* #define SIGINT 2
* #define SIGILL 4
* #define SIGABRT_COMPAT 6
* #define SIGFPE 8
* #define SIGSEGV 11
* #define SIGTERM 15
* #define SIGBREAK 21
* #define SIGABRT 22
*
* The additional signals have values that are common on other Unix
* variants (Linux and Darwin)
*/
#define SIGHUP 1
#define SIGKILL 9
#define SIGWINCH 28
/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many */
/* unix-like platforms. However MinGW doesn't define it, so we do. */
#ifndef SIGABRT_COMPAT
# define SIGABRT_COMPAT 6
#endif
/*
* Guids and typedefs for winsock extension functions
* Mingw32 doesn't have these :-(
*/
#ifndef WSAID_ACCEPTEX
# define WSAID_ACCEPTEX \
{0xb5367df1, 0xcbac, 0x11cf, \
{0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
# define WSAID_CONNECTEX \
{0x25a207b9, 0xddf3, 0x4660, \
{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}}
# define WSAID_GETACCEPTEXSOCKADDRS \
{0xb5367df2, 0xcbac, 0x11cf, \
{0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
# define WSAID_DISCONNECTEX \
{0x7fda2e11, 0x8630, 0x436f, \
{0xa0, 0x31, 0xf5, 0x36, 0xa6, 0xee, 0xc1, 0x57}}
# define WSAID_TRANSMITFILE \
{0xb5367df0, 0xcbac, 0x11cf, \
{0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
typedef BOOL PASCAL (*LPFN_ACCEPTEX)
(SOCKET sListenSocket,
SOCKET sAcceptSocket,
PVOID lpOutputBuffer,
DWORD dwReceiveDataLength,
DWORD dwLocalAddressLength,
DWORD dwRemoteAddressLength,
LPDWORD lpdwBytesReceived,
LPOVERLAPPED lpOverlapped);
typedef BOOL PASCAL (*LPFN_CONNECTEX)
(SOCKET s,
const struct sockaddr* name,
int namelen,
PVOID lpSendBuffer,
DWORD dwSendDataLength,
LPDWORD lpdwBytesSent,
LPOVERLAPPED lpOverlapped);
typedef void PASCAL (*LPFN_GETACCEPTEXSOCKADDRS)
(PVOID lpOutputBuffer,
DWORD dwReceiveDataLength,
DWORD dwLocalAddressLength,
DWORD dwRemoteAddressLength,
LPSOCKADDR* LocalSockaddr,
LPINT LocalSockaddrLength,
LPSOCKADDR* RemoteSockaddr,
LPINT RemoteSockaddrLength);
typedef BOOL PASCAL (*LPFN_DISCONNECTEX)
(SOCKET hSocket,
LPOVERLAPPED lpOverlapped,
DWORD dwFlags,
DWORD reserved);
typedef BOOL PASCAL (*LPFN_TRANSMITFILE)
(SOCKET hSocket,
HANDLE hFile,
DWORD nNumberOfBytesToWrite,
DWORD nNumberOfBytesPerSend,
LPOVERLAPPED lpOverlapped,
LPTRANSMIT_FILE_BUFFERS lpTransmitBuffers,
DWORD dwFlags);
typedef PVOID RTL_SRWLOCK;
typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
#endif
typedef int (WSAAPI* LPFN_WSARECV)
(SOCKET socket,
LPWSABUF buffers,
DWORD buffer_count,
LPDWORD bytes,
LPDWORD flags,
LPWSAOVERLAPPED overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
typedef int (WSAAPI* LPFN_WSARECVFROM)
(SOCKET socket,
LPWSABUF buffers,
DWORD buffer_count,
LPDWORD bytes,
LPDWORD flags,
struct sockaddr* addr,
LPINT addr_len,
LPWSAOVERLAPPED overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
#ifndef _NTDEF_
typedef LONG NTSTATUS;
typedef NTSTATUS *PNTSTATUS;
#endif
#ifndef RTL_CONDITION_VARIABLE_INIT
typedef PVOID CONDITION_VARIABLE, *PCONDITION_VARIABLE;
#endif
typedef struct _AFD_POLL_HANDLE_INFO {
HANDLE Handle;
ULONG Events;
NTSTATUS Status;
} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
typedef struct _AFD_POLL_INFO {
LARGE_INTEGER Timeout;
ULONG NumberOfHandles;
ULONG Exclusive;
AFD_POLL_HANDLE_INFO Handles[1];
} AFD_POLL_INFO, *PAFD_POLL_INFO;
#define UV_MSAFD_PROVIDER_COUNT 3
/**
* It should be possible to cast uv_buf_t[] to WSABUF[]
* see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx
*/
typedef struct uv_buf_t {
ULONG len;
char* base;
} uv_buf_t;
typedef int uv_file;
typedef SOCKET uv_os_sock_t;
typedef HANDLE uv_thread_t;
typedef HANDLE uv_sem_t;
typedef CRITICAL_SECTION uv_mutex_t;
/* This condition variable implementation is based on the SetEvent solution
* (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
* We could not use the SignalObjectAndWait solution (section 3.4) because
* it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and
* uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs.
*/
typedef union {
CONDITION_VARIABLE cond_var;
struct {
unsigned int waiters_count;
CRITICAL_SECTION waiters_count_lock;
HANDLE signal_event;
HANDLE broadcast_event;
} fallback;
} uv_cond_t;
typedef union {
/* srwlock_ has type SRWLOCK, but not all toolchains define this type in */
/* windows.h. */
SRWLOCK srwlock_;
struct {
uv_mutex_t read_mutex_;
uv_mutex_t write_mutex_;
unsigned int num_readers_;
} fallback_;
} uv_rwlock_t;
typedef struct {
unsigned int n;
unsigned int count;
uv_mutex_t mutex;
uv_sem_t turnstile1;
uv_sem_t turnstile2;
} uv_barrier_t;
typedef struct {
DWORD tls_index;
} uv_key_t;
#define UV_ONCE_INIT { 0, NULL }
typedef struct uv_once_s {
unsigned char ran;
HANDLE event;
} uv_once_t;
/* Platform-specific definitions for uv_spawn support. */
typedef unsigned char uv_uid_t;
typedef unsigned char uv_gid_t;
typedef struct uv__dirent_s {
int d_type;
char d_name[1];
} uv__dirent_t;
#define UV__DT_DIR UV_DIRENT_DIR
#define UV__DT_FILE UV_DIRENT_FILE
/* Platform-specific definitions for uv_dlopen support. */
#define UV_DYNAMIC FAR WINAPI
typedef struct {
HMODULE handle;
char* errmsg;
} uv_lib_t;
RB_HEAD(uv_timer_tree_s, uv_timer_s);
#define UV_LOOP_PRIVATE_FIELDS \
/* The loop's I/O completion port */ \
HANDLE iocp; \
/* The current time according to the event loop. in msecs. */ \
uint64_t time; \
/* GetTickCount() result when the event loop time was last updated. */ \
DWORD last_tick_count; \
/* Tail of a single-linked circular queue of pending reqs. If the queue */ \
/* is empty, tail_ is NULL. If there is only one item, */ \
/* tail_->next_req == tail_ */ \
uv_req_t* pending_reqs_tail; \
/* Head of a single-linked list of closed handles */ \
uv_handle_t* endgame_handles; \
/* The head of the timers tree */ \
struct uv_timer_tree_s timers; \
/* Lists of active loop (prepare / check / idle) watchers */ \
uv_prepare_t* prepare_handles; \
uv_check_t* check_handles; \
uv_idle_t* idle_handles; \
/* This pointer will refer to the prepare/check/idle handle whose */ \
/* callback is scheduled to be called next. This is needed to allow */ \
/* safe removal from one of the lists above while that list being */ \
/* iterated over. */ \
uv_prepare_t* next_prepare_handle; \
uv_check_t* next_check_handle; \
uv_idle_t* next_idle_handle; \
/* This handle holds the peer sockets for the fast variant of uv_poll_t */ \
SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \
/* Counter to keep track of active tcp streams */ \
unsigned int active_tcp_streams; \
/* Counter to keep track of active udp streams */ \
unsigned int active_udp_streams; \
/* Counter to started timer */ \
uint64_t timer_counter; \
/* Threadpool */ \
void* wq[2]; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async;
#define UV_REQ_TYPE_PRIVATE \
/* TODO: remove the req suffix */ \
UV_ACCEPT, \
UV_FS_EVENT_REQ, \
UV_POLL_REQ, \
UV_PROCESS_EXIT, \
UV_READ, \
UV_UDP_RECV, \
UV_WAKEUP, \
UV_SIGNAL_REQ,
#define UV_REQ_PRIVATE_FIELDS \
union { \
/* Used by I/O operations */ \
struct { \
OVERLAPPED overlapped; \
size_t queued_bytes; \
}; \
}; \
struct uv_req_s* next_req;
#define UV_WRITE_PRIVATE_FIELDS \
int ipc_header; \
uv_buf_t write_buffer; \
HANDLE event_handle; \
HANDLE wait_handle;
#define UV_CONNECT_PRIVATE_FIELDS \
/* empty */
#define UV_SHUTDOWN_PRIVATE_FIELDS \
/* empty */
#define UV_UDP_SEND_PRIVATE_FIELDS \
/* empty */
#define UV_PRIVATE_REQ_TYPES \
typedef struct uv_pipe_accept_s { \
UV_REQ_FIELDS \
HANDLE pipeHandle; \
struct uv_pipe_accept_s* next_pending; \
} uv_pipe_accept_t; \
\
typedef struct uv_tcp_accept_s { \
UV_REQ_FIELDS \
SOCKET accept_socket; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32]; \
HANDLE event_handle; \
HANDLE wait_handle; \
struct uv_tcp_accept_s* next_pending; \
} uv_tcp_accept_t; \
\
typedef struct uv_read_s { \
UV_REQ_FIELDS \
HANDLE event_handle; \
HANDLE wait_handle; \
} uv_read_t;
#define uv_stream_connection_fields \
unsigned int write_reqs_pending; \
uv_shutdown_t* shutdown_req;
#define uv_stream_server_fields \
uv_connection_cb connection_cb;
#define UV_STREAM_PRIVATE_FIELDS \
unsigned int reqs_pending; \
int activecnt; \
uv_read_t read_req; \
union { \
struct { uv_stream_connection_fields }; \
struct { uv_stream_server_fields }; \
};
#define uv_tcp_server_fields \
uv_tcp_accept_t* accept_reqs; \
unsigned int processed_accepts; \
uv_tcp_accept_t* pending_accepts; \
LPFN_ACCEPTEX func_acceptex;
#define uv_tcp_connection_fields \
uv_buf_t read_buffer; \
LPFN_CONNECTEX func_connectex;
#define UV_TCP_PRIVATE_FIELDS \
SOCKET socket; \
int delayed_error; \
union { \
struct { uv_tcp_server_fields }; \
struct { uv_tcp_connection_fields }; \
};
#define UV_UDP_PRIVATE_FIELDS \
SOCKET socket; \
unsigned int reqs_pending; \
int activecnt; \
uv_req_t recv_req; \
uv_buf_t recv_buffer; \
struct sockaddr_storage recv_from; \
int recv_from_len; \
uv_udp_recv_cb recv_cb; \
uv_alloc_cb alloc_cb; \
LPFN_WSARECV func_wsarecv; \
LPFN_WSARECVFROM func_wsarecvfrom;
#define uv_pipe_server_fields \
int pending_instances; \
uv_pipe_accept_t* accept_reqs; \
uv_pipe_accept_t* pending_accepts;
#define uv_pipe_connection_fields \
uv_timer_t* eof_timer; \
uv_write_t ipc_header_write_req; \
int ipc_pid; \
uint64_t remaining_ipc_rawdata_bytes; \
struct { \
void* queue[2]; \
int queue_len; \
} pending_ipc_info; \
uv_write_t* non_overlapped_writes_tail; \
uv_mutex_t readfile_mutex; \
volatile HANDLE readfile_thread; \
void* reserved;
#define UV_PIPE_PRIVATE_FIELDS \
HANDLE handle; \
WCHAR* name; \
union { \
struct { uv_pipe_server_fields }; \
struct { uv_pipe_connection_fields }; \
};
/* TODO: put the parser states in an union - TTY handles are always */
/* half-duplex so read-state can safely overlap write-state. */
#define UV_TTY_PRIVATE_FIELDS \
HANDLE handle; \
union { \
struct { \
/* Used for readable TTY handles */ \
HANDLE read_line_handle; \
uv_buf_t read_line_buffer; \
HANDLE read_raw_wait; \
/* Fields used for translating win keystrokes into vt100 characters */ \
char last_key[8]; \
unsigned char last_key_offset; \
unsigned char last_key_len; \
WCHAR last_utf16_high_surrogate; \
INPUT_RECORD last_input_record; \
}; \
struct { \
/* Used for writable TTY handles */ \
/* utf8-to-utf16 conversion state */ \
unsigned int utf8_codepoint; \
unsigned char utf8_bytes_left; \
/* eol conversion state */ \
unsigned char previous_eol; \
/* ansi parser state */ \
unsigned char ansi_parser_state; \
unsigned char ansi_csi_argc; \
unsigned short ansi_csi_argv[4]; \
COORD saved_position; \
WORD saved_attributes; \
}; \
};
#define UV_POLL_PRIVATE_FIELDS \
SOCKET socket; \
/* Used in fast mode */ \
SOCKET peer_socket; \
AFD_POLL_INFO afd_poll_info_1; \
AFD_POLL_INFO afd_poll_info_2; \
/* Used in fast and slow mode. */ \
uv_req_t poll_req_1; \
uv_req_t poll_req_2; \
unsigned char submitted_events_1; \
unsigned char submitted_events_2; \
unsigned char mask_events_1; \
unsigned char mask_events_2; \
unsigned char events;
#define UV_TIMER_PRIVATE_FIELDS \
RB_ENTRY(uv_timer_s) tree_entry; \
uint64_t due; \
uint64_t repeat; \
uint64_t start_id; \
uv_timer_cb timer_cb;
#define UV_ASYNC_PRIVATE_FIELDS \
struct uv_req_s async_req; \
uv_async_cb async_cb; \
/* char to avoid alignment issues */ \
char volatile async_sent;
#define UV_PREPARE_PRIVATE_FIELDS \
uv_prepare_t* prepare_prev; \
uv_prepare_t* prepare_next; \
uv_prepare_cb prepare_cb;
#define UV_CHECK_PRIVATE_FIELDS \
uv_check_t* check_prev; \
uv_check_t* check_next; \
uv_check_cb check_cb;
#define UV_IDLE_PRIVATE_FIELDS \
uv_idle_t* idle_prev; \
uv_idle_t* idle_next; \
uv_idle_cb idle_cb;
#define UV_HANDLE_PRIVATE_FIELDS \
uv_handle_t* endgame_next; \
unsigned int flags;
#define UV_GETADDRINFO_PRIVATE_FIELDS \
struct uv__work work_req; \
uv_getaddrinfo_cb getaddrinfo_cb; \
void* alloc; \
WCHAR* node; \
WCHAR* service; \
struct addrinfoW* hints; \
struct addrinfoW* res; \
int retcode;
#define UV_GETNAMEINFO_PRIVATE_FIELDS \
struct uv__work work_req; \
uv_getnameinfo_cb getnameinfo_cb; \
struct sockaddr_storage storage; \
int flags; \
char host[NI_MAXHOST]; \
char service[NI_MAXSERV]; \
int retcode;
#define UV_PROCESS_PRIVATE_FIELDS \
struct uv_process_exit_s { \
UV_REQ_FIELDS \
} exit_req; \
BYTE* child_stdio_buffer; \
int exit_signal; \
HANDLE wait_handle; \
HANDLE process_handle; \
volatile char exit_cb_pending;
#define UV_FS_PRIVATE_FIELDS \
struct uv__work work_req; \
int flags; \
DWORD sys_errno_; \
union { \
/* TODO: remove me in 0.9. */ \
WCHAR* pathw; \
int fd; \
}; \
union { \
struct { \
int mode; \
WCHAR* new_pathw; \
int file_flags; \
int fd_out; \
unsigned int nbufs; \
uv_buf_t* bufs; \
int64_t offset; \
uv_buf_t bufsml[4]; \
}; \
struct { \
double atime; \
double mtime; \
}; \
};
#define UV_WORK_PRIVATE_FIELDS \
struct uv__work work_req;
#define UV_FS_EVENT_PRIVATE_FIELDS \
struct uv_fs_event_req_s { \
UV_REQ_FIELDS \
} req; \
HANDLE dir_handle; \
int req_pending; \
uv_fs_event_cb cb; \
WCHAR* filew; \
WCHAR* short_filew; \
WCHAR* dirw; \
char* buffer;
#define UV_SIGNAL_PRIVATE_FIELDS \
RB_ENTRY(uv_signal_s) tree_entry; \
struct uv_req_s signal_req; \
unsigned long pending_signum;
int uv_utf16_to_utf8(const WCHAR* utf16Buffer, size_t utf16Size,
char* utf8Buffer, size_t utf8Size);
int uv_utf8_to_utf16(const char* utf8Buffer, WCHAR* utf16Buffer,
size_t utf16Size);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
prefix=@prefix@
exec_prefix=@prefix@
libdir=@libdir@
includedir=@includedir@
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
Description: multi-platform support library with a focus on asynchronous I/O.
Libs: -L${libdir} -luv @LIBS@
Cflags: -I${includedir}

2
outside/libuv_11/m4/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# Ignore libtoolize-generated files.
*.m4

22
outside/libuv_11/samples/.gitignore vendored Normal file
View File

@ -0,0 +1,22 @@
# Copyright StrongLoop, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
*.mk
*.Makefile

View File

@ -0,0 +1,21 @@
# Copyright StrongLoop, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
/build/

View File

@ -0,0 +1,53 @@
Files: *
========
Copyright StrongLoop, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
Files: getopt.c
===============
Copyright (c) 1987, 1993, 1994
The Regents of the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.

View File

@ -0,0 +1,46 @@
# Copyright StrongLoop, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
{
'targets': [
{
'dependencies': ['../../uv.gyp:libuv'],
'target_name': 's5-proxy',
'type': 'executable',
'sources': [
'client.c',
'defs.h',
'main.c',
's5.c',
's5.h',
'server.c',
'util.c',
],
'conditions': [
['OS=="win"', {
'defines': ['HAVE_UNISTD_H=0'],
'sources': ['getopt.c']
}, {
'defines': ['HAVE_UNISTD_H=1']
}]
]
}
]
}

View File

@ -0,0 +1,737 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "defs.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
/* A connection is modeled as an abstraction on top of two simple state
* machines, one for reading and one for writing. Either state machine
* is, when active, in one of three states: busy, done or stop; the fourth
* and final state, dead, is an end state and only relevant when shutting
* down the connection. A short overview:
*
* busy done stop
* ----------|---------------------------|--------------------|------|
* readable | waiting for incoming data | have incoming data | idle |
* writable | busy writing out data | completed write | idle |
*
* We could remove the done state from the writable state machine. For our
* purposes, it's functionally equivalent to the stop state.
*
* When the connection with upstream has been established, the client_ctx
* moves into a state where incoming data from the client is sent upstream
* and vice versa, incoming data from upstream is sent to the client. In
* other words, we're just piping data back and forth. See conn_cycle()
* for details.
*
* An interesting deviation from libuv's I/O model is that reads are discrete
* rather than continuous events. In layman's terms, when a read operation
* completes, the connection stops reading until further notice.
*
* The rationale for this approach is that we have to wait until the data
* has been sent out again before we can reuse the read buffer.
*
* It also pleasingly unifies with the request model that libuv uses for
* writes and everything else; libuv may switch to a request model for
* reads in the future.
*/
enum conn_state {
c_busy, /* Busy; waiting for incoming data or for a write to complete. */
c_done, /* Done; read incoming data or write finished. */
c_stop, /* Stopped. */
c_dead
};
/* Session states. */
enum sess_state {
s_handshake, /* Wait for client handshake. */
s_handshake_auth, /* Wait for client authentication data. */
s_req_start, /* Start waiting for request data. */
s_req_parse, /* Wait for request data. */
s_req_lookup, /* Wait for upstream hostname DNS lookup to complete. */
s_req_connect, /* Wait for uv_tcp_connect() to complete. */
s_proxy_start, /* Connected. Start piping data. */
s_proxy, /* Connected. Pipe data back and forth. */
s_kill, /* Tear down session. */
s_almost_dead_0, /* Waiting for finalizers to complete. */
s_almost_dead_1, /* Waiting for finalizers to complete. */
s_almost_dead_2, /* Waiting for finalizers to complete. */
s_almost_dead_3, /* Waiting for finalizers to complete. */
s_almost_dead_4, /* Waiting for finalizers to complete. */
s_dead /* Dead. Safe to free now. */
};
static void do_next(client_ctx *cx);
static int do_handshake(client_ctx *cx);
static int do_handshake_auth(client_ctx *cx);
static int do_req_start(client_ctx *cx);
static int do_req_parse(client_ctx *cx);
static int do_req_lookup(client_ctx *cx);
static int do_req_connect_start(client_ctx *cx);
static int do_req_connect(client_ctx *cx);
static int do_proxy_start(client_ctx *cx);
static int do_proxy(client_ctx *cx);
static int do_kill(client_ctx *cx);
static int do_almost_dead(client_ctx *cx);
static int conn_cycle(const char *who, conn *a, conn *b);
static void conn_timer_reset(conn *c);
static void conn_timer_expire(uv_timer_t *handle, int status);
static void conn_getaddrinfo(conn *c, const char *hostname);
static void conn_getaddrinfo_done(uv_getaddrinfo_t *req,
int status,
struct addrinfo *ai);
static int conn_connect(conn *c);
static void conn_connect_done(uv_connect_t *req, int status);
static void conn_read(conn *c);
static void conn_read_done(uv_stream_t *handle,
ssize_t nread,
const uv_buf_t *buf);
static void conn_alloc(uv_handle_t *handle, size_t size, uv_buf_t *buf);
static void conn_write(conn *c, const void *data, unsigned int len);
static void conn_write_done(uv_write_t *req, int status);
static void conn_close(conn *c);
static void conn_close_done(uv_handle_t *handle);
/* |incoming| has been initialized by server.c when this is called. */
void client_finish_init(server_ctx *sx, client_ctx *cx) {
conn *incoming;
conn *outgoing;
cx->sx = sx;
cx->state = s_handshake;
s5_init(&cx->parser);
incoming = &cx->incoming;
incoming->client = cx;
incoming->result = 0;
incoming->rdstate = c_stop;
incoming->wrstate = c_stop;
incoming->idle_timeout = sx->idle_timeout;
CHECK(0 == uv_timer_init(sx->loop, &incoming->timer_handle));
outgoing = &cx->outgoing;
outgoing->client = cx;
outgoing->result = 0;
outgoing->rdstate = c_stop;
outgoing->wrstate = c_stop;
outgoing->idle_timeout = sx->idle_timeout;
CHECK(0 == uv_tcp_init(cx->sx->loop, &outgoing->handle.tcp));
CHECK(0 == uv_timer_init(cx->sx->loop, &outgoing->timer_handle));
/* Wait for the initial packet. */
conn_read(incoming);
}
/* This is the core state machine that drives the client <-> upstream proxy.
* We move through the initial handshake and authentication steps first and
* end up (if all goes well) in the proxy state where we're just proxying
* data between the client and upstream.
*/
static void do_next(client_ctx *cx) {
int new_state;
ASSERT(cx->state != s_dead);
switch (cx->state) {
case s_handshake:
new_state = do_handshake(cx);
break;
case s_handshake_auth:
new_state = do_handshake_auth(cx);
break;
case s_req_start:
new_state = do_req_start(cx);
break;
case s_req_parse:
new_state = do_req_parse(cx);
break;
case s_req_lookup:
new_state = do_req_lookup(cx);
break;
case s_req_connect:
new_state = do_req_connect(cx);
break;
case s_proxy_start:
new_state = do_proxy_start(cx);
break;
case s_proxy:
new_state = do_proxy(cx);
break;
case s_kill:
new_state = do_kill(cx);
break;
case s_almost_dead_0:
case s_almost_dead_1:
case s_almost_dead_2:
case s_almost_dead_3:
case s_almost_dead_4:
new_state = do_almost_dead(cx);
break;
default:
UNREACHABLE();
}
cx->state = new_state;
if (cx->state == s_dead) {
if (DEBUG_CHECKS) {
memset(cx, -1, sizeof(*cx));
}
free(cx);
}
}
static int do_handshake(client_ctx *cx) {
unsigned int methods;
conn *incoming;
s5_ctx *parser;
uint8_t *data;
size_t size;
int err;
parser = &cx->parser;
incoming = &cx->incoming;
ASSERT(incoming->rdstate == c_done);
ASSERT(incoming->wrstate == c_stop);
incoming->rdstate = c_stop;
if (incoming->result < 0) {
pr_err("read error: %s", uv_strerror(incoming->result));
return do_kill(cx);
}
data = (uint8_t *) incoming->t.buf;
size = (size_t) incoming->result;
err = s5_parse(parser, &data, &size);
if (err == s5_ok) {
conn_read(incoming);
return s_handshake; /* Need more data. */
}
if (size != 0) {
/* Could allow a round-trip saving shortcut here if the requested auth
* method is S5_AUTH_NONE (provided unauthenticated traffic is allowed.)
* Requires client support however.
*/
pr_err("junk in handshake");
return do_kill(cx);
}
if (err != s5_auth_select) {
pr_err("handshake error: %s", s5_strerror(err));
return do_kill(cx);
}
methods = s5_auth_methods(parser);
if ((methods & S5_AUTH_NONE) && can_auth_none(cx->sx, cx)) {
s5_select_auth(parser, S5_AUTH_NONE);
conn_write(incoming, "\5\0", 2); /* No auth required. */
return s_req_start;
}
if ((methods & S5_AUTH_PASSWD) && can_auth_passwd(cx->sx, cx)) {
/* TODO(bnoordhuis) Implement username/password auth. */
}
conn_write(incoming, "\5\377", 2); /* No acceptable auth. */
return s_kill;
}
/* TODO(bnoordhuis) Implement username/password auth. */
static int do_handshake_auth(client_ctx *cx) {
UNREACHABLE();
return do_kill(cx);
}
static int do_req_start(client_ctx *cx) {
conn *incoming;
incoming = &cx->incoming;
ASSERT(incoming->rdstate == c_stop);
ASSERT(incoming->wrstate == c_done);
incoming->wrstate = c_stop;
if (incoming->result < 0) {
pr_err("write error: %s", uv_strerror(incoming->result));
return do_kill(cx);
}
conn_read(incoming);
return s_req_parse;
}
static int do_req_parse(client_ctx *cx) {
conn *incoming;
conn *outgoing;
s5_ctx *parser;
uint8_t *data;
size_t size;
int err;
parser = &cx->parser;
incoming = &cx->incoming;
outgoing = &cx->outgoing;
ASSERT(incoming->rdstate == c_done);
ASSERT(incoming->wrstate == c_stop);
ASSERT(outgoing->rdstate == c_stop);
ASSERT(outgoing->wrstate == c_stop);
incoming->rdstate = c_stop;
if (incoming->result < 0) {
pr_err("read error: %s", uv_strerror(incoming->result));
return do_kill(cx);
}
data = (uint8_t *) incoming->t.buf;
size = (size_t) incoming->result;
err = s5_parse(parser, &data, &size);
if (err == s5_ok) {
conn_read(incoming);
return s_req_parse; /* Need more data. */
}
if (size != 0) {
pr_err("junk in request %u", (unsigned) size);
return do_kill(cx);
}
if (err != s5_exec_cmd) {
pr_err("request error: %s", s5_strerror(err));
return do_kill(cx);
}
if (parser->cmd == s5_cmd_tcp_bind) {
/* Not supported but relatively straightforward to implement. */
pr_warn("BIND requests are not supported.");
return do_kill(cx);
}
if (parser->cmd == s5_cmd_udp_assoc) {
/* Not supported. Might be hard to implement because libuv has no
* functionality for detecting the MTU size which the RFC mandates.
*/
pr_warn("UDP ASSOC requests are not supported.");
return do_kill(cx);
}
ASSERT(parser->cmd == s5_cmd_tcp_connect);
if (parser->atyp == s5_atyp_host) {
conn_getaddrinfo(outgoing, (const char *) parser->daddr);
return s_req_lookup;
}
if (parser->atyp == s5_atyp_ipv4) {
memset(&outgoing->t.addr4, 0, sizeof(outgoing->t.addr4));
outgoing->t.addr4.sin_family = AF_INET;
outgoing->t.addr4.sin_port = htons(parser->dport);
memcpy(&outgoing->t.addr4.sin_addr,
parser->daddr,
sizeof(outgoing->t.addr4.sin_addr));
} else if (parser->atyp == s5_atyp_ipv6) {
memset(&outgoing->t.addr6, 0, sizeof(outgoing->t.addr6));
outgoing->t.addr6.sin6_family = AF_INET6;
outgoing->t.addr6.sin6_port = htons(parser->dport);
memcpy(&outgoing->t.addr6.sin6_addr,
parser->daddr,
sizeof(outgoing->t.addr6.sin6_addr));
} else {
UNREACHABLE();
}
return do_req_connect_start(cx);
}
static int do_req_lookup(client_ctx *cx) {
s5_ctx *parser;
conn *incoming;
conn *outgoing;
parser = &cx->parser;
incoming = &cx->incoming;
outgoing = &cx->outgoing;
ASSERT(incoming->rdstate == c_stop);
ASSERT(incoming->wrstate == c_stop);
ASSERT(outgoing->rdstate == c_stop);
ASSERT(outgoing->wrstate == c_stop);
if (outgoing->result < 0) {
/* TODO(bnoordhuis) Escape control characters in parser->daddr. */
pr_err("lookup error for \"%s\": %s",
parser->daddr,
uv_strerror(outgoing->result));
/* Send back a 'Host unreachable' reply. */
conn_write(incoming, "\5\4\0\1\0\0\0\0\0\0", 10);
return s_kill;
}
/* Don't make assumptions about the offset of sin_port/sin6_port. */
switch (outgoing->t.addr.sa_family) {
case AF_INET:
outgoing->t.addr4.sin_port = htons(parser->dport);
break;
case AF_INET6:
outgoing->t.addr6.sin6_port = htons(parser->dport);
break;
default:
UNREACHABLE();
}
return do_req_connect_start(cx);
}
/* Assumes that cx->outgoing.t.sa contains a valid AF_INET/AF_INET6 address. */
static int do_req_connect_start(client_ctx *cx) {
conn *incoming;
conn *outgoing;
int err;
incoming = &cx->incoming;
outgoing = &cx->outgoing;
ASSERT(incoming->rdstate == c_stop);
ASSERT(incoming->wrstate == c_stop);
ASSERT(outgoing->rdstate == c_stop);
ASSERT(outgoing->wrstate == c_stop);
if (!can_access(cx->sx, cx, &outgoing->t.addr)) {
pr_warn("connection not allowed by ruleset");
/* Send a 'Connection not allowed by ruleset' reply. */
conn_write(incoming, "\5\2\0\1\0\0\0\0\0\0", 10);
return s_kill;
}
err = conn_connect(outgoing);
if (err != 0) {
pr_err("connect error: %s\n", uv_strerror(err));
return do_kill(cx);
}
return s_req_connect;
}
static int do_req_connect(client_ctx *cx) {
const struct sockaddr_in6 *in6;
const struct sockaddr_in *in;
char addr_storage[sizeof(*in6)];
conn *incoming;
conn *outgoing;
uint8_t *buf;
int addrlen;
incoming = &cx->incoming;
outgoing = &cx->outgoing;
ASSERT(incoming->rdstate == c_stop);
ASSERT(incoming->wrstate == c_stop);
ASSERT(outgoing->rdstate == c_stop);
ASSERT(outgoing->wrstate == c_stop);
/* Build and send the reply. Not very pretty but gets the job done. */
buf = (uint8_t *) incoming->t.buf;
if (outgoing->result == 0) {
/* The RFC mandates that the SOCKS server must include the local port
* and address in the reply. So that's what we do.
*/
addrlen = sizeof(addr_storage);
CHECK(0 == uv_tcp_getsockname(&outgoing->handle.tcp,
(struct sockaddr *) addr_storage,
&addrlen));
buf[0] = 5; /* Version. */
buf[1] = 0; /* Success. */
buf[2] = 0; /* Reserved. */
if (addrlen == sizeof(*in)) {
buf[3] = 1; /* IPv4. */
in = (const struct sockaddr_in *) &addr_storage;
memcpy(buf + 4, &in->sin_addr, 4);
memcpy(buf + 8, &in->sin_port, 2);
conn_write(incoming, buf, 10);
} else if (addrlen == sizeof(*in6)) {
buf[3] = 4; /* IPv6. */
in6 = (const struct sockaddr_in6 *) &addr_storage;
memcpy(buf + 4, &in6->sin6_addr, 16);
memcpy(buf + 20, &in6->sin6_port, 2);
conn_write(incoming, buf, 22);
} else {
UNREACHABLE();
}
return s_proxy_start;
} else {
pr_err("upstream connection error: %s\n", uv_strerror(outgoing->result));
/* Send a 'Connection refused' reply. */
conn_write(incoming, "\5\5\0\1\0\0\0\0\0\0", 10);
return s_kill;
}
UNREACHABLE();
return s_kill;
}
static int do_proxy_start(client_ctx *cx) {
conn *incoming;
conn *outgoing;
incoming = &cx->incoming;
outgoing = &cx->outgoing;
ASSERT(incoming->rdstate == c_stop);
ASSERT(incoming->wrstate == c_done);
ASSERT(outgoing->rdstate == c_stop);
ASSERT(outgoing->wrstate == c_stop);
incoming->wrstate = c_stop;
if (incoming->result < 0) {
pr_err("write error: %s", uv_strerror(incoming->result));
return do_kill(cx);
}
conn_read(incoming);
conn_read(outgoing);
return s_proxy;
}
/* Proxy incoming data back and forth. */
static int do_proxy(client_ctx *cx) {
if (conn_cycle("client", &cx->incoming, &cx->outgoing)) {
return do_kill(cx);
}
if (conn_cycle("upstream", &cx->outgoing, &cx->incoming)) {
return do_kill(cx);
}
return s_proxy;
}
static int do_kill(client_ctx *cx) {
int new_state;
if (cx->state >= s_almost_dead_0) {
return cx->state;
}
/* Try to cancel the request. The callback still runs but if the
* cancellation succeeded, it gets called with status=UV_ECANCELED.
*/
new_state = s_almost_dead_1;
if (cx->state == s_req_lookup) {
new_state = s_almost_dead_0;
uv_cancel(&cx->outgoing.t.req);
}
conn_close(&cx->incoming);
conn_close(&cx->outgoing);
return new_state;
}
static int do_almost_dead(client_ctx *cx) {
ASSERT(cx->state >= s_almost_dead_0);
return cx->state + 1; /* Another finalizer completed. */
}
static int conn_cycle(const char *who, conn *a, conn *b) {
if (a->result < 0) {
if (a->result != UV_EOF) {
pr_err("%s error: %s", who, uv_strerror(a->result));
}
return -1;
}
if (b->result < 0) {
return -1;
}
if (a->wrstate == c_done) {
a->wrstate = c_stop;
}
/* The logic is as follows: read when we don't write and write when we don't
* read. That gives us back-pressure handling for free because if the peer
* sends data faster than we consume it, TCP congestion control kicks in.
*/
if (a->wrstate == c_stop) {
if (b->rdstate == c_stop) {
conn_read(b);
} else if (b->rdstate == c_done) {
conn_write(a, b->t.buf, b->result);
b->rdstate = c_stop; /* Triggers the call to conn_read() above. */
}
}
return 0;
}
static void conn_timer_reset(conn *c) {
CHECK(0 == uv_timer_start(&c->timer_handle,
conn_timer_expire,
c->idle_timeout,
0));
}
static void conn_timer_expire(uv_timer_t *handle, int status) {
conn *c;
CHECK(0 == status);
c = CONTAINER_OF(handle, conn, timer_handle);
c->result = UV_ETIMEDOUT;
do_next(c->client);
}
static void conn_getaddrinfo(conn *c, const char *hostname) {
struct addrinfo hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
CHECK(0 == uv_getaddrinfo(c->client->sx->loop,
&c->t.addrinfo_req,
conn_getaddrinfo_done,
hostname,
NULL,
&hints));
conn_timer_reset(c);
}
static void conn_getaddrinfo_done(uv_getaddrinfo_t *req,
int status,
struct addrinfo *ai) {
conn *c;
c = CONTAINER_OF(req, conn, t.addrinfo_req);
c->result = status;
if (status == 0) {
/* FIXME(bnoordhuis) Should try all addresses. */
if (ai->ai_family == AF_INET) {
c->t.addr4 = *(const struct sockaddr_in *) ai->ai_addr;
} else if (ai->ai_family == AF_INET6) {
c->t.addr6 = *(const struct sockaddr_in6 *) ai->ai_addr;
} else {
UNREACHABLE();
}
}
uv_freeaddrinfo(ai);
do_next(c->client);
}
/* Assumes that c->t.sa contains a valid AF_INET or AF_INET6 address. */
static int conn_connect(conn *c) {
ASSERT(c->t.addr.sa_family == AF_INET ||
c->t.addr.sa_family == AF_INET6);
conn_timer_reset(c);
return uv_tcp_connect(&c->t.connect_req,
&c->handle.tcp,
&c->t.addr,
conn_connect_done);
}
static void conn_connect_done(uv_connect_t *req, int status) {
conn *c;
if (status == UV_ECANCELED) {
return; /* Handle has been closed. */
}
c = CONTAINER_OF(req, conn, t.connect_req);
c->result = status;
do_next(c->client);
}
static void conn_read(conn *c) {
ASSERT(c->rdstate == c_stop);
CHECK(0 == uv_read_start(&c->handle.stream, conn_alloc, conn_read_done));
c->rdstate = c_busy;
conn_timer_reset(c);
}
static void conn_read_done(uv_stream_t *handle,
ssize_t nread,
const uv_buf_t *buf) {
conn *c;
c = CONTAINER_OF(handle, conn, handle);
ASSERT(c->t.buf == buf->base);
ASSERT(c->rdstate == c_busy);
c->rdstate = c_done;
c->result = nread;
uv_read_stop(&c->handle.stream);
do_next(c->client);
}
static void conn_alloc(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
conn *c;
c = CONTAINER_OF(handle, conn, handle);
ASSERT(c->rdstate == c_busy);
buf->base = c->t.buf;
buf->len = sizeof(c->t.buf);
}
static void conn_write(conn *c, const void *data, unsigned int len) {
uv_buf_t buf;
ASSERT(c->wrstate == c_stop || c->wrstate == c_done);
c->wrstate = c_busy;
/* It's okay to cast away constness here, uv_write() won't modify the
* memory.
*/
buf.base = (char *) data;
buf.len = len;
CHECK(0 == uv_write(&c->write_req,
&c->handle.stream,
&buf,
1,
conn_write_done));
conn_timer_reset(c);
}
static void conn_write_done(uv_write_t *req, int status) {
conn *c;
if (status == UV_ECANCELED) {
return; /* Handle has been closed. */
}
c = CONTAINER_OF(req, conn, write_req);
ASSERT(c->wrstate == c_busy);
c->wrstate = c_done;
c->result = status;
do_next(c->client);
}
static void conn_close(conn *c) {
ASSERT(c->rdstate != c_dead);
ASSERT(c->wrstate != c_dead);
c->rdstate = c_dead;
c->wrstate = c_dead;
c->timer_handle.data = c;
c->handle.handle.data = c;
uv_close(&c->handle.handle, conn_close_done);
uv_close((uv_handle_t *) &c->timer_handle, conn_close_done);
}
static void conn_close_done(uv_handle_t *handle) {
conn *c;
c = handle->data;
do_next(c->client);
}

View File

@ -0,0 +1,139 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef DEFS_H_
#define DEFS_H_
#include "s5.h"
#include "uv.h"
#include <assert.h>
#include <netinet/in.h> /* sockaddr_in, sockaddr_in6 */
#include <stddef.h> /* size_t, ssize_t */
#include <stdint.h>
#include <sys/socket.h> /* sockaddr */
struct client_ctx;
typedef struct {
const char *bind_host;
unsigned short bind_port;
unsigned int idle_timeout;
} server_config;
typedef struct {
unsigned int idle_timeout; /* Connection idle timeout in ms. */
uv_tcp_t tcp_handle;
uv_loop_t *loop;
} server_ctx;
typedef struct {
unsigned char rdstate;
unsigned char wrstate;
unsigned int idle_timeout;
struct client_ctx *client; /* Backlink to owning client context. */
ssize_t result;
union {
uv_handle_t handle;
uv_stream_t stream;
uv_tcp_t tcp;
uv_udp_t udp;
} handle;
uv_timer_t timer_handle; /* For detecting timeouts. */
uv_write_t write_req;
/* We only need one of these at a time so make them share memory. */
union {
uv_getaddrinfo_t addrinfo_req;
uv_connect_t connect_req;
uv_req_t req;
struct sockaddr_in6 addr6;
struct sockaddr_in addr4;
struct sockaddr addr;
char buf[2048]; /* Scratch space. Used to read data into. */
} t;
} conn;
typedef struct client_ctx {
unsigned int state;
server_ctx *sx; /* Backlink to owning server context. */
s5_ctx parser; /* The SOCKS protocol parser. */
conn incoming; /* Connection with the SOCKS client. */
conn outgoing; /* Connection with upstream. */
} client_ctx;
/* server.c */
int server_run(const server_config *cf, uv_loop_t *loop);
int can_auth_none(const server_ctx *sx, const client_ctx *cx);
int can_auth_passwd(const server_ctx *sx, const client_ctx *cx);
int can_access(const server_ctx *sx,
const client_ctx *cx,
const struct sockaddr *addr);
/* client.c */
void client_finish_init(server_ctx *sx, client_ctx *cx);
/* util.c */
#if defined(__GNUC__)
# define ATTRIBUTE_FORMAT_PRINTF(a, b) __attribute__((format(printf, a, b)))
#else
# define ATTRIBUTE_FORMAT_PRINTF(a, b)
#endif
void pr_info(const char *fmt, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
void pr_warn(const char *fmt, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
void pr_err(const char *fmt, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
void *xmalloc(size_t size);
/* main.c */
const char *_getprogname(void);
/* getopt.c */
#if !HAVE_UNISTD_H
extern char *optarg;
int getopt(int argc, char **argv, const char *options);
#endif
/* ASSERT() is for debug checks, CHECK() for run-time sanity checks.
* DEBUG_CHECKS is for expensive debug checks that we only want to
* enable in debug builds but still want type-checked by the compiler
* in release builds.
*/
#if defined(NDEBUG)
# define ASSERT(exp)
# define CHECK(exp) do { if (!(exp)) abort(); } while (0)
# define DEBUG_CHECKS (0)
#else
# define ASSERT(exp) assert(exp)
# define CHECK(exp) assert(exp)
# define DEBUG_CHECKS (1)
#endif
#define UNREACHABLE() CHECK(!"Unreachable code reached.")
/* This macro looks complicated but it's not: it calculates the address
* of the embedding struct through the address of the embedded struct.
* In other words, if struct A embeds struct B, then we can obtain
* the address of A by taking the address of B and subtracting the
* field offset of B in A.
*/
#define CONTAINER_OF(ptr, type, field) \
((type *) ((char *) (ptr) - ((char *) &((type *) 0)->field)))
#endif /* DEFS_H_ */

View File

@ -0,0 +1,131 @@
/* $NetBSD: getopt.c,v 1.26 2003/08/07 16:43:40 agc Exp $ */
/*
* Copyright (c) 1987, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)getopt.c 8.3 (Berkeley) 4/27/95";
#endif /* LIBC_SCCS and not lint */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
extern const char *_getprogname(void);
int opterr = 1, /* if error message should be printed */
optind = 1, /* index into parent argv vector */
optopt, /* character checked for validity */
optreset; /* reset getopt */
char *optarg; /* argument associated with option */
#define BADCH (int)'?'
#define BADARG (int)':'
#define EMSG ""
/*
* getopt --
* Parse argc/argv argument vector.
*/
int
getopt(nargc, nargv, ostr)
int nargc;
char * const nargv[];
const char *ostr;
{
static char *place = EMSG; /* option letter processing */
char *oli; /* option letter list index */
if (optreset || *place == 0) { /* update scanning pointer */
optreset = 0;
place = nargv[optind];
if (optind >= nargc || *place++ != '-') {
/* Argument is absent or is not an option */
place = EMSG;
return (-1);
}
optopt = *place++;
if (optopt == '-' && *place == 0) {
/* "--" => end of options */
++optind;
place = EMSG;
return (-1);
}
if (optopt == 0) {
/* Solitary '-', treat as a '-' option
if the program (eg su) is looking for it. */
place = EMSG;
if (strchr(ostr, '-') == NULL)
return (-1);
optopt = '-';
}
} else
optopt = *place++;
/* See if option letter is one the caller wanted... */
if (optopt == ':' || (oli = strchr(ostr, optopt)) == NULL) {
if (*place == 0)
++optind;
if (opterr && *ostr != ':')
(void)fprintf(stderr,
"%s: illegal option -- %c\n", _getprogname(),
optopt);
return (BADCH);
}
/* Does this option need an argument? */
if (oli[1] != ':') {
/* don't need argument */
optarg = NULL;
if (*place == 0)
++optind;
} else {
/* Option-argument is either the rest of this argument or the
entire next argument. */
if (*place)
optarg = place;
else if (nargc > ++optind)
optarg = nargv[optind];
else {
/* option-argument absent */
place = EMSG;
if (*ostr == ':')
return (BADARG);
if (opterr)
(void)fprintf(stderr,
"%s: option requires an argument -- %c\n",
_getprogname(), optopt);
return (BADCH);
}
place = EMSG;
++optind;
}
return (optopt); /* return option letter */
}

View File

@ -0,0 +1,99 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "defs.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if HAVE_UNISTD_H
#include <unistd.h> /* getopt */
#endif
#define DEFAULT_BIND_HOST "127.0.0.1"
#define DEFAULT_BIND_PORT 1080
#define DEFAULT_IDLE_TIMEOUT (60 * 1000)
static void parse_opts(server_config *cf, int argc, char **argv);
static void usage(void);
static const char *progname = __FILE__; /* Reset in main(). */
int main(int argc, char **argv) {
server_config config;
int err;
progname = argv[0];
memset(&config, 0, sizeof(config));
config.bind_host = DEFAULT_BIND_HOST;
config.bind_port = DEFAULT_BIND_PORT;
config.idle_timeout = DEFAULT_IDLE_TIMEOUT;
parse_opts(&config, argc, argv);
err = server_run(&config, uv_default_loop());
if (err) {
exit(1);
}
return 0;
}
const char *_getprogname(void) {
return progname;
}
static void parse_opts(server_config *cf, int argc, char **argv) {
int opt;
while (-1 != (opt = getopt(argc, argv, "H:hp:"))) {
switch (opt) {
case 'H':
cf->bind_host = optarg;
break;
case 'p':
if (1 != sscanf(optarg, "%hu", &cf->bind_port)) {
pr_err("bad port number: %s", optarg);
usage();
}
break;
default:
usage();
}
}
}
static void usage(void) {
printf("Usage:\n"
"\n"
" %s [-b <address> [-h] [-p <port>]\n"
"\n"
"Options:\n"
"\n"
" -b <hostname|address> Bind to this address or hostname.\n"
" Default: \"127.0.0.1\"\n"
" -h Show this help message.\n"
" -p <port> Bind to this port number. Default: 1080\n"
"",
progname);
exit(1);
}

View File

@ -0,0 +1,271 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "s5.h"
#include <errno.h>
#include <stdint.h>
#include <stdlib.h> /* abort() */
#include <string.h> /* memset() */
enum {
s5_version,
s5_nmethods,
s5_methods,
s5_auth_pw_version,
s5_auth_pw_userlen,
s5_auth_pw_username,
s5_auth_pw_passlen,
s5_auth_pw_password,
s5_req_version,
s5_req_cmd,
s5_req_reserved,
s5_req_atyp,
s5_req_atyp_host,
s5_req_daddr,
s5_req_dport0,
s5_req_dport1,
s5_dead
};
void s5_init(s5_ctx *cx) {
memset(cx, 0, sizeof(*cx));
cx->state = s5_version;
}
s5_err s5_parse(s5_ctx *cx, uint8_t **data, size_t *size) {
s5_err err;
uint8_t *p;
uint8_t c;
size_t i;
size_t n;
p = *data;
n = *size;
i = 0;
while (i < n) {
c = p[i];
i += 1;
switch (cx->state) {
case s5_version:
if (c != 5) {
err = s5_bad_version;
goto out;
}
cx->state = s5_nmethods;
break;
case s5_nmethods:
cx->arg0 = 0;
cx->arg1 = c; /* Number of bytes to read. */
cx->state = s5_methods;
break;
case s5_methods:
if (cx->arg0 < cx->arg1) {
switch (c) {
case 0:
cx->methods |= S5_AUTH_NONE;
break;
case 1:
cx->methods |= S5_AUTH_GSSAPI;
break;
case 2:
cx->methods |= S5_AUTH_PASSWD;
break;
/* Ignore everything we don't understand. */
}
cx->arg0 += 1;
}
if (cx->arg0 == cx->arg1) {
err = s5_auth_select;
goto out;
}
break;
case s5_auth_pw_version:
if (c != 1) {
err = s5_bad_version;
goto out;
}
cx->state = s5_auth_pw_userlen;
break;
case s5_auth_pw_userlen:
cx->arg0 = 0;
cx->userlen = c;
cx->state = s5_auth_pw_username;
break;
case s5_auth_pw_username:
if (cx->arg0 < cx->userlen) {
cx->username[cx->arg0] = c;
cx->arg0 += 1;
}
if (cx->arg0 == cx->userlen) {
cx->username[cx->userlen] = '\0';
cx->state = s5_auth_pw_passlen;
}
break;
case s5_auth_pw_passlen:
cx->arg0 = 0;
cx->passlen = c;
cx->state = s5_auth_pw_password;
break;
case s5_auth_pw_password:
if (cx->arg0 < cx->passlen) {
cx->password[cx->arg0] = c;
cx->arg0 += 1;
}
if (cx->arg0 == cx->passlen) {
cx->password[cx->passlen] = '\0';
cx->state = s5_req_version;
err = s5_auth_verify;
goto out;
}
break;
case s5_req_version:
if (c != 5) {
err = s5_bad_version;
goto out;
}
cx->state = s5_req_cmd;
break;
case s5_req_cmd:
switch (c) {
case 1: /* TCP connect */
cx->cmd = s5_cmd_tcp_connect;
break;
case 3: /* UDP associate */
cx->cmd = s5_cmd_udp_assoc;
break;
default:
err = s5_bad_cmd;
goto out;
}
cx->state = s5_req_reserved;
break;
case s5_req_reserved:
cx->state = s5_req_atyp;
break;
case s5_req_atyp:
cx->arg0 = 0;
switch (c) {
case 1: /* IPv4, four octets. */
cx->state = s5_req_daddr;
cx->atyp = s5_atyp_ipv4;
cx->arg1 = 4;
break;
case 3: /* Hostname. First byte is length. */
cx->state = s5_req_atyp_host;
cx->atyp = s5_atyp_host;
cx->arg1 = 0;
break;
case 4: /* IPv6, sixteen octets. */
cx->state = s5_req_daddr;
cx->atyp = s5_atyp_ipv6;
cx->arg1 = 16;
break;
default:
err = s5_bad_atyp;
goto out;
}
break;
case s5_req_atyp_host:
cx->arg1 = c;
cx->state = s5_req_daddr;
break;
case s5_req_daddr:
if (cx->arg0 < cx->arg1) {
cx->daddr[cx->arg0] = c;
cx->arg0 += 1;
}
if (cx->arg0 == cx->arg1) {
cx->daddr[cx->arg1] = '\0';
cx->state = s5_req_dport0;
}
break;
case s5_req_dport0:
cx->dport = c << 8;
cx->state = s5_req_dport1;
break;
case s5_req_dport1:
cx->dport |= c;
cx->state = s5_dead;
err = s5_exec_cmd;
goto out;
case s5_dead:
break;
default:
abort();
}
}
err = s5_ok;
out:
*data = p + i;
*size = n - i;
return err;
}
unsigned int s5_auth_methods(const s5_ctx *cx) {
return cx->methods;
}
int s5_select_auth(s5_ctx *cx, s5_auth_method method) {
int err;
err = 0;
switch (method) {
case S5_AUTH_NONE:
cx->state = s5_req_version;
break;
case S5_AUTH_PASSWD:
cx->state = s5_auth_pw_version;
break;
default:
err = -EINVAL;
}
return err;
}
const char *s5_strerror(s5_err err) {
#define S5_ERR_GEN(_, name, errmsg) case s5_ ## name: return errmsg;
switch (err) {
S5_ERR_MAP(S5_ERR_GEN)
default: ; /* Silence s5_max_errors -Wswitch warning. */
}
#undef S5_ERR_GEN
return "Unknown error.";
}

View File

@ -0,0 +1,94 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef S5_H_
#define S5_H_
#include <stddef.h>
#include <stdint.h>
#define S5_ERR_MAP(V) \
V(-1, bad_version, "Bad protocol version.") \
V(-2, bad_cmd, "Bad protocol command.") \
V(-3, bad_atyp, "Bad address type.") \
V(0, ok, "No error.") \
V(1, auth_select, "Select authentication method.") \
V(2, auth_verify, "Verify authentication.") \
V(3, exec_cmd, "Execute command.") \
typedef enum {
#define S5_ERR_GEN(code, name, _) s5_ ## name = code,
S5_ERR_MAP(S5_ERR_GEN)
#undef S5_ERR_GEN
s5_max_errors
} s5_err;
typedef enum {
S5_AUTH_NONE = 1 << 0,
S5_AUTH_GSSAPI = 1 << 1,
S5_AUTH_PASSWD = 1 << 2
} s5_auth_method;
typedef enum {
s5_auth_allow,
s5_auth_deny
} s5_auth_result;
typedef enum {
s5_atyp_ipv4,
s5_atyp_ipv6,
s5_atyp_host
} s5_atyp;
typedef enum {
s5_cmd_tcp_connect,
s5_cmd_tcp_bind,
s5_cmd_udp_assoc
} s5_cmd;
typedef struct {
uint32_t arg0; /* Scratch space for the state machine. */
uint32_t arg1; /* Scratch space for the state machine. */
uint8_t state;
uint8_t methods;
uint8_t cmd;
uint8_t atyp;
uint8_t userlen;
uint8_t passlen;
uint16_t dport;
uint8_t username[257];
uint8_t password[257];
uint8_t daddr[257]; /* TODO(bnoordhuis) Merge with username/password. */
} s5_ctx;
void s5_init(s5_ctx *ctx);
s5_err s5_parse(s5_ctx *cx, uint8_t **data, size_t *size);
/* Only call after s5_parse() has returned s5_want_auth_method. */
unsigned int s5_auth_methods(const s5_ctx *cx);
/* Call after s5_parse() has returned s5_want_auth_method. */
int s5_select_auth(s5_ctx *cx, s5_auth_method method);
const char *s5_strerror(s5_err err);
#endif /* S5_H_ */

View File

@ -0,0 +1,241 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "defs.h"
#include <netinet/in.h> /* INET6_ADDRSTRLEN */
#include <stdlib.h>
#include <string.h>
#ifndef INET6_ADDRSTRLEN
# define INET6_ADDRSTRLEN 63
#endif
typedef struct {
uv_getaddrinfo_t getaddrinfo_req;
server_config config;
server_ctx *servers;
uv_loop_t *loop;
} server_state;
static void do_bind(uv_getaddrinfo_t *req, int status, struct addrinfo *ai);
static void on_connection(uv_stream_t *server, int status);
int server_run(const server_config *cf, uv_loop_t *loop) {
struct addrinfo hints;
server_state state;
int err;
memset(&state, 0, sizeof(state));
state.servers = NULL;
state.config = *cf;
state.loop = loop;
/* Resolve the address of the interface that we should bind to.
* The getaddrinfo callback starts the server and everything else.
*/
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
err = uv_getaddrinfo(loop,
&state.getaddrinfo_req,
do_bind,
cf->bind_host,
NULL,
&hints);
if (err != 0) {
pr_err("getaddrinfo: %s", uv_strerror(err));
return err;
}
/* Start the event loop. Control continues in do_bind(). */
if (uv_run(loop, UV_RUN_DEFAULT)) {
abort();
}
/* Please Valgrind. */
uv_loop_delete(loop);
free(state.servers);
return 0;
}
/* Bind a server to each address that getaddrinfo() reported. */
static void do_bind(uv_getaddrinfo_t *req, int status, struct addrinfo *addrs) {
char addrbuf[INET6_ADDRSTRLEN + 1];
unsigned int ipv4_naddrs;
unsigned int ipv6_naddrs;
server_state *state;
server_config *cf;
struct addrinfo *ai;
const void *addrv;
const char *what;
uv_loop_t *loop;
server_ctx *sx;
unsigned int n;
int err;
union {
struct sockaddr addr;
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
} s;
state = CONTAINER_OF(req, server_state, getaddrinfo_req);
loop = state->loop;
cf = &state->config;
if (status < 0) {
pr_err("getaddrinfo(\"%s\"): %s", cf->bind_host, uv_strerror(status));
uv_freeaddrinfo(addrs);
return;
}
ipv4_naddrs = 0;
ipv6_naddrs = 0;
for (ai = addrs; ai != NULL; ai = ai->ai_next) {
if (ai->ai_family == AF_INET) {
ipv4_naddrs += 1;
} else if (ai->ai_family == AF_INET6) {
ipv6_naddrs += 1;
}
}
if (ipv4_naddrs == 0 && ipv6_naddrs == 0) {
pr_err("%s has no IPv4/6 addresses", cf->bind_host);
uv_freeaddrinfo(addrs);
return;
}
state->servers =
xmalloc((ipv4_naddrs + ipv6_naddrs) * sizeof(state->servers[0]));
n = 0;
for (ai = addrs; ai != NULL; ai = ai->ai_next) {
if (ai->ai_family != AF_INET && ai->ai_family != AF_INET6) {
continue;
}
if (ai->ai_family == AF_INET) {
s.addr4 = *(const struct sockaddr_in *) ai->ai_addr;
s.addr4.sin_port = htons(cf->bind_port);
addrv = &s.addr4.sin_addr;
} else if (ai->ai_family == AF_INET6) {
s.addr6 = *(const struct sockaddr_in6 *) ai->ai_addr;
s.addr6.sin6_port = htons(cf->bind_port);
addrv = &s.addr6.sin6_addr;
} else {
UNREACHABLE();
}
if (uv_inet_ntop(s.addr.sa_family, addrv, addrbuf, sizeof(addrbuf))) {
UNREACHABLE();
}
sx = state->servers + n;
sx->loop = loop;
sx->idle_timeout = state->config.idle_timeout;
CHECK(0 == uv_tcp_init(loop, &sx->tcp_handle));
what = "uv_tcp_bind";
err = uv_tcp_bind(&sx->tcp_handle, &s.addr, 0);
if (err == 0) {
what = "uv_listen";
err = uv_listen((uv_stream_t *) &sx->tcp_handle, 128, on_connection);
}
if (err != 0) {
pr_err("%s(\"%s:%hu\"): %s",
what,
addrbuf,
cf->bind_port,
uv_strerror(err));
while (n > 0) {
n -= 1;
uv_close((uv_handle_t *) (state->servers + n), NULL);
}
break;
}
pr_info("listening on %s:%hu", addrbuf, cf->bind_port);
n += 1;
}
uv_freeaddrinfo(addrs);
}
static void on_connection(uv_stream_t *server, int status) {
server_ctx *sx;
client_ctx *cx;
CHECK(status == 0);
sx = CONTAINER_OF(server, server_ctx, tcp_handle);
cx = xmalloc(sizeof(*cx));
CHECK(0 == uv_tcp_init(sx->loop, &cx->incoming.handle.tcp));
CHECK(0 == uv_accept(server, &cx->incoming.handle.stream));
client_finish_init(sx, cx);
}
int can_auth_none(const server_ctx *sx, const client_ctx *cx) {
return 1;
}
int can_auth_passwd(const server_ctx *sx, const client_ctx *cx) {
return 0;
}
int can_access(const server_ctx *sx,
const client_ctx *cx,
const struct sockaddr *addr) {
const struct sockaddr_in6 *addr6;
const struct sockaddr_in *addr4;
const uint32_t *p;
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
/* TODO(bnoordhuis) Implement proper access checks. For now, just reject
* traffic to localhost.
*/
if (addr->sa_family == AF_INET) {
addr4 = (const struct sockaddr_in *) addr;
d = ntohl(addr4->sin_addr.s_addr);
return (d >> 24) != 0x7F;
}
if (addr->sa_family == AF_INET6) {
addr6 = (const struct sockaddr_in6 *) addr;
p = (const uint32_t *) &addr6->sin6_addr.s6_addr;
a = ntohl(p[0]);
b = ntohl(p[1]);
c = ntohl(p[2]);
d = ntohl(p[3]);
if (a == 0 && b == 0 && c == 0 && d == 1) {
return 0; /* "::1" style address. */
}
if (a == 0 && b == 0 && c == 0xFFFF && (d >> 24) == 0x7F) {
return 0; /* "::ffff:127.x.x.x" style address. */
}
return 1;
}
return 0;
}

View File

@ -0,0 +1,72 @@
/* Copyright StrongLoop, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "defs.h"
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
static void pr_do(FILE *stream,
const char *label,
const char *fmt,
va_list ap);
void *xmalloc(size_t size) {
void *ptr;
ptr = malloc(size);
if (ptr == NULL) {
pr_err("out of memory, need %lu bytes", (unsigned long) size);
exit(1);
}
return ptr;
}
void pr_info(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
pr_do(stdout, "info", fmt, ap);
va_end(ap);
}
void pr_warn(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
pr_do(stderr, "warn", fmt, ap);
va_end(ap);
}
void pr_err(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
pr_do(stderr, "error", fmt, ap);
va_end(ap);
}
static void pr_do(FILE *stream,
const char *label,
const char *fmt,
va_list ap) {
char fmtbuf[1024];
vsnprintf(fmtbuf, sizeof(fmtbuf), fmt, ap);
fprintf(stream, "%s:%s: %s\n", _getprogname(), label, fmtbuf);
}

View File

@ -0,0 +1,255 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
struct poll_ctx {
uv_fs_poll_t* parent_handle; /* NULL if parent has been stopped or closed */
int busy_polling;
unsigned int interval;
uint64_t start_time;
uv_loop_t* loop;
uv_fs_poll_cb poll_cb;
uv_timer_t timer_handle;
uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
uv_stat_t statbuf;
char path[1]; /* variable length */
};
static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b);
static void poll_cb(uv_fs_t* req);
static void timer_cb(uv_timer_t* timer);
static void timer_close_cb(uv_handle_t* handle);
static uv_stat_t zero_statbuf;
int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
return 0;
}
int uv_fs_poll_start(uv_fs_poll_t* handle,
uv_fs_poll_cb cb,
const char* path,
unsigned int interval) {
struct poll_ctx* ctx;
uv_loop_t* loop;
size_t len;
int err;
if (uv__is_active(handle))
return 0;
loop = handle->loop;
len = strlen(path);
ctx = calloc(1, sizeof(*ctx) + len);
if (ctx == NULL)
return UV_ENOMEM;
ctx->loop = loop;
ctx->poll_cb = cb;
ctx->interval = interval ? interval : 1;
ctx->start_time = uv_now(loop);
ctx->parent_handle = handle;
memcpy(ctx->path, path, len + 1);
err = uv_timer_init(loop, &ctx->timer_handle);
if (err < 0)
goto error;
ctx->timer_handle.flags |= UV__HANDLE_INTERNAL;
uv__handle_unref(&ctx->timer_handle);
err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb);
if (err < 0)
goto error;
handle->poll_ctx = ctx;
uv__handle_start(handle);
return 0;
error:
free(ctx);
return err;
}
int uv_fs_poll_stop(uv_fs_poll_t* handle) {
struct poll_ctx* ctx;
if (!uv__is_active(handle))
return 0;
ctx = handle->poll_ctx;
assert(ctx != NULL);
assert(ctx->parent_handle != NULL);
ctx->parent_handle = NULL;
handle->poll_ctx = NULL;
/* Close the timer if it's active. If it's inactive, there's a stat request
* in progress and poll_cb will take care of the cleanup.
*/
if (uv__is_active(&ctx->timer_handle))
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv__handle_stop(handle);
return 0;
}
int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buf, size_t* len) {
struct poll_ctx* ctx;
size_t required_len;
if (!uv__is_active(handle)) {
*len = 0;
return UV_EINVAL;
}
ctx = handle->poll_ctx;
assert(ctx != NULL);
required_len = strlen(ctx->path) + 1;
if (required_len > *len) {
*len = required_len;
return UV_ENOBUFS;
}
memcpy(buf, ctx->path, required_len);
*len = required_len;
return 0;
}
void uv__fs_poll_close(uv_fs_poll_t* handle) {
uv_fs_poll_stop(handle);
}
static void timer_cb(uv_timer_t* timer) {
struct poll_ctx* ctx;
ctx = container_of(timer, struct poll_ctx, timer_handle);
assert(ctx->parent_handle != NULL);
assert(ctx->parent_handle->poll_ctx == ctx);
ctx->start_time = uv_now(ctx->loop);
if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb))
abort();
}
static void poll_cb(uv_fs_t* req) {
uv_stat_t* statbuf;
struct poll_ctx* ctx;
uint64_t interval;
ctx = container_of(req, struct poll_ctx, fs_req);
if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv_fs_req_cleanup(req);
return;
}
if (req->result != 0) {
if (ctx->busy_polling != req->result) {
ctx->poll_cb(ctx->parent_handle,
req->result,
&ctx->statbuf,
&zero_statbuf);
ctx->busy_polling = req->result;
}
goto out;
}
statbuf = &req->statbuf;
if (ctx->busy_polling != 0)
if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
ctx->statbuf = *statbuf;
ctx->busy_polling = 1;
out:
uv_fs_req_cleanup(req);
if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
return;
}
/* Reschedule timer, subtract the delay from doing the stat(). */
interval = ctx->interval;
interval -= (uv_now(ctx->loop) - ctx->start_time) % interval;
if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0))
abort();
}
static void timer_close_cb(uv_handle_t* handle) {
free(container_of(handle, struct poll_ctx, timer_handle));
}
static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) {
return a->st_ctim.tv_nsec == b->st_ctim.tv_nsec
&& a->st_mtim.tv_nsec == b->st_mtim.tv_nsec
&& a->st_birthtim.tv_nsec == b->st_birthtim.tv_nsec
&& a->st_ctim.tv_sec == b->st_ctim.tv_sec
&& a->st_mtim.tv_sec == b->st_mtim.tv_sec
&& a->st_birthtim.tv_sec == b->st_birthtim.tv_sec
&& a->st_size == b->st_size
&& a->st_mode == b->st_mode
&& a->st_uid == b->st_uid
&& a->st_gid == b->st_gid
&& a->st_ino == b->st_ino
&& a->st_dev == b->st_dev
&& a->st_flags == b->st_flags
&& a->st_gen == b->st_gen;
}
#if defined(_WIN32)
#include "win/internal.h"
#include "win/handle-inl.h"
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
assert(handle->flags & UV__HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
#endif /* _WIN32 */

View File

@ -0,0 +1,245 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SRC_HEAP_H_
#define UV_SRC_HEAP_H_
#include <stddef.h> /* NULL */
#if defined(__GNUC__)
# define HEAP_EXPORT(declaration) __attribute__((unused)) static declaration
#else
# define HEAP_EXPORT(declaration) static declaration
#endif
struct heap_node {
struct heap_node* left;
struct heap_node* right;
struct heap_node* parent;
};
/* A binary min heap. The usual properties hold: the root is the lowest
* element in the set, the height of the tree is at most log2(nodes) and
* it's always a complete binary tree.
*
* The heap function try hard to detect corrupted tree nodes at the cost
* of a minor reduction in performance. Compile with -DNDEBUG to disable.
*/
struct heap {
struct heap_node* min;
unsigned int nelts;
};
/* Return non-zero if a < b. */
typedef int (*heap_compare_fn)(const struct heap_node* a,
const struct heap_node* b);
/* Public functions. */
HEAP_EXPORT(void heap_init(struct heap* heap));
HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap));
HEAP_EXPORT(void heap_insert(struct heap* heap,
struct heap_node* newnode,
heap_compare_fn less_than));
HEAP_EXPORT(void heap_remove(struct heap* heap,
struct heap_node* node,
heap_compare_fn less_than));
HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than));
/* Implementation follows. */
HEAP_EXPORT(void heap_init(struct heap* heap)) {
heap->min = NULL;
heap->nelts = 0;
}
HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap)) {
return heap->min;
}
/* Swap parent with child. Child moves closer to the root, parent moves away. */
static void heap_node_swap(struct heap* heap,
struct heap_node* parent,
struct heap_node* child) {
struct heap_node* sibling;
struct heap_node t;
t = *parent;
*parent = *child;
*child = t;
parent->parent = child;
if (child->left == child) {
child->left = parent;
sibling = child->right;
} else {
child->right = parent;
sibling = child->left;
}
if (sibling != NULL)
sibling->parent = child;
if (parent->left != NULL)
parent->left->parent = parent;
if (parent->right != NULL)
parent->right->parent = parent;
if (child->parent == NULL)
heap->min = child;
else if (child->parent->left == parent)
child->parent->left = child;
else
child->parent->right = child;
}
HEAP_EXPORT(void heap_insert(struct heap* heap,
struct heap_node* newnode,
heap_compare_fn less_than)) {
struct heap_node** parent;
struct heap_node** child;
unsigned int path;
unsigned int n;
unsigned int k;
newnode->left = NULL;
newnode->right = NULL;
newnode->parent = NULL;
/* Calculate the path from the root to the insertion point. This is a min
* heap so we always insert at the left-most free node of the bottom row.
*/
path = 0;
for (k = 0, n = 1 + heap->nelts; n >= 2; k += 1, n /= 2)
path = (path << 1) | (n & 1);
/* Now traverse the heap using the path we calculated in the previous step. */
parent = child = &heap->min;
while (k > 0) {
parent = child;
if (path & 1)
child = &(*child)->right;
else
child = &(*child)->left;
path >>= 1;
k -= 1;
}
/* Insert the new node. */
newnode->parent = *parent;
*child = newnode;
heap->nelts += 1;
/* Walk up the tree and check at each node if the heap property holds.
* It's a min heap so parent < child must be true.
*/
while (newnode->parent != NULL && less_than(newnode, newnode->parent))
heap_node_swap(heap, newnode->parent, newnode);
}
HEAP_EXPORT(void heap_remove(struct heap* heap,
struct heap_node* node,
heap_compare_fn less_than)) {
struct heap_node* smallest;
struct heap_node** max;
struct heap_node* child;
unsigned int path;
unsigned int k;
unsigned int n;
if (heap->nelts == 0)
return;
/* Calculate the path from the min (the root) to the max, the left-most node
* of the bottom row.
*/
path = 0;
for (k = 0, n = heap->nelts; n >= 2; k += 1, n /= 2)
path = (path << 1) | (n & 1);
/* Now traverse the heap using the path we calculated in the previous step. */
max = &heap->min;
while (k > 0) {
if (path & 1)
max = &(*max)->right;
else
max = &(*max)->left;
path >>= 1;
k -= 1;
}
heap->nelts -= 1;
/* Unlink the max node. */
child = *max;
*max = NULL;
if (child == node) {
/* We're removing either the max or the last node in the tree. */
if (child == heap->min) {
heap->min = NULL;
}
return;
}
/* Replace the to be deleted node with the max node. */
child->left = node->left;
child->right = node->right;
child->parent = node->parent;
if (child->left != NULL) {
child->left->parent = child;
}
if (child->right != NULL) {
child->right->parent = child;
}
if (node->parent == NULL) {
heap->min = child;
} else if (node->parent->left == node) {
node->parent->left = child;
} else {
node->parent->right = child;
}
/* Walk down the subtree and check at each node if the heap property holds.
* It's a min heap so parent < child must be true. If the parent is bigger,
* swap it with the smallest child.
*/
for (;;) {
smallest = child;
if (child->left != NULL && less_than(child->left, smallest))
smallest = child->left;
if (child->right != NULL && less_than(child->right, smallest))
smallest = child->right;
if (smallest == child)
break;
heap_node_swap(heap, child, smallest);
}
/* Walk up the subtree and check that each parent is less than the node
* this is required, because `max` node is not guaranteed to be the
* actual maximum in tree
*/
while (child->parent != NULL && less_than(child, child->parent))
heap_node_swap(heap, child->parent, child);
}
HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)) {
heap_remove(heap, heap->min, less_than);
}
#undef HEAP_EXPORT
#endif /* UV_SRC_HEAP_H_ */

313
outside/libuv_11/src/inet.c Normal file
View File

@ -0,0 +1,313 @@
/*
* Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
* Copyright (c) 1996-1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <stdio.h>
#include <string.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv.h"
#include "uv-common.h"
#define UV__INET_ADDRSTRLEN 16
#define UV__INET6_ADDRSTRLEN 46
static int inet_ntop4(const unsigned char *src, char *dst, size_t size);
static int inet_ntop6(const unsigned char *src, char *dst, size_t size);
static int inet_pton4(const char *src, unsigned char *dst);
static int inet_pton6(const char *src, unsigned char *dst);
int uv_inet_ntop(int af, const void* src, char* dst, size_t size) {
switch (af) {
case AF_INET:
return (inet_ntop4(src, dst, size));
case AF_INET6:
return (inet_ntop6(src, dst, size));
default:
return UV_EAFNOSUPPORT;
}
/* NOTREACHED */
}
static int inet_ntop4(const unsigned char *src, char *dst, size_t size) {
static const char fmt[] = "%u.%u.%u.%u";
char tmp[UV__INET_ADDRSTRLEN];
int l;
#ifndef _WIN32
l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
#else
l = _snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
#endif
if (l <= 0 || (size_t) l >= size) {
return UV_ENOSPC;
}
strncpy(dst, tmp, size);
dst[size - 1] = '\0';
return 0;
}
static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
/*
* Note that int32_t and int16_t need only be "at least" large enough
* to contain a value of the specified size. On some systems, like
* Crays, there is no such thing as an integer variable with 16 bits.
* Keep this in mind if you think this function should have been coded
* to use pointer overlays. All the world's not a VAX.
*/
char tmp[UV__INET6_ADDRSTRLEN], *tp;
struct { int base, len; } best, cur;
unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)];
int i;
/*
* Preprocess:
* Copy the input (bytewise) array into a wordwise array.
* Find the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < (int) sizeof(struct in6_addr); i++)
words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
best.base = -1;
best.len = 0;
cur.base = -1;
cur.len = 0;
for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
if (words[i] == 0) {
if (cur.base == -1)
cur.base = i, cur.len = 1;
else
cur.len++;
} else {
if (cur.base != -1) {
if (best.base == -1 || cur.len > best.len)
best = cur;
cur.base = -1;
}
}
}
if (cur.base != -1) {
if (best.base == -1 || cur.len > best.len)
best = cur;
}
if (best.base != -1 && best.len < 2)
best.base = -1;
/*
* Format the result.
*/
tp = tmp;
for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
/* Are we inside the best run of 0x00's? */
if (best.base != -1 && i >= best.base &&
i < (best.base + best.len)) {
if (i == best.base)
*tp++ = ':';
continue;
}
/* Are we following an initial run of 0x00s or any real hex? */
if (i != 0)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
(best.len == 7 && words[7] != 0x0001) ||
(best.len == 5 && words[5] == 0xffff))) {
int err = inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp));
if (err)
return err;
tp += strlen(tp);
break;
}
tp += sprintf(tp, "%x", words[i]);
}
/* Was it a trailing run of 0x00's? */
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
*tp++ = ':';
*tp++ = '\0';
/*
* Check for overflow, copy, and we're done.
*/
if ((size_t)(tp - tmp) > size) {
return UV_ENOSPC;
}
strcpy(dst, tmp);
return 0;
}
int uv_inet_pton(int af, const char* src, void* dst) {
if (src == NULL || dst == NULL)
return UV_EINVAL;
switch (af) {
case AF_INET:
return (inet_pton4(src, dst));
case AF_INET6: {
int len;
char tmp[UV__INET6_ADDRSTRLEN], *s, *p;
s = (char*) src;
p = strchr(src, '%');
if (p != NULL) {
s = tmp;
len = p - src;
if (len > UV__INET6_ADDRSTRLEN-1)
return UV_EINVAL;
memcpy(s, src, len);
s[len] = '\0';
}
return inet_pton6(s, dst);
}
default:
return UV_EAFNOSUPPORT;
}
/* NOTREACHED */
}
static int inet_pton4(const char *src, unsigned char *dst) {
static const char digits[] = "0123456789";
int saw_digit, octets, ch;
unsigned char tmp[sizeof(struct in_addr)], *tp;
saw_digit = 0;
octets = 0;
*(tp = tmp) = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr(digits, ch)) != NULL) {
unsigned int nw = *tp * 10 + (pch - digits);
if (saw_digit && *tp == 0)
return UV_EINVAL;
if (nw > 255)
return UV_EINVAL;
*tp = nw;
if (!saw_digit) {
if (++octets > 4)
return UV_EINVAL;
saw_digit = 1;
}
} else if (ch == '.' && saw_digit) {
if (octets == 4)
return UV_EINVAL;
*++tp = 0;
saw_digit = 0;
} else
return UV_EINVAL;
}
if (octets < 4)
return UV_EINVAL;
memcpy(dst, tmp, sizeof(struct in_addr));
return 0;
}
static int inet_pton6(const char *src, unsigned char *dst) {
static const char xdigits_l[] = "0123456789abcdef",
xdigits_u[] = "0123456789ABCDEF";
unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp;
const char *xdigits, *curtok;
int ch, seen_xdigits;
unsigned int val;
memset((tp = tmp), '\0', sizeof tmp);
endp = tp + sizeof tmp;
colonp = NULL;
/* Leading :: requires some special handling. */
if (*src == ':')
if (*++src != ':')
return UV_EINVAL;
curtok = src;
seen_xdigits = 0;
val = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
pch = strchr((xdigits = xdigits_u), ch);
if (pch != NULL) {
val <<= 4;
val |= (pch - xdigits);
if (++seen_xdigits > 4)
return UV_EINVAL;
continue;
}
if (ch == ':') {
curtok = src;
if (!seen_xdigits) {
if (colonp)
return UV_EINVAL;
colonp = tp;
continue;
} else if (*src == '\0') {
return UV_EINVAL;
}
if (tp + sizeof(uint16_t) > endp)
return UV_EINVAL;
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
seen_xdigits = 0;
val = 0;
continue;
}
if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) {
int err = inet_pton4(curtok, tp);
if (err == 0) {
tp += sizeof(struct in_addr);
seen_xdigits = 0;
break; /*%< '\\0' was seen by inet_pton4(). */
}
}
return UV_EINVAL;
}
if (seen_xdigits) {
if (tp + sizeof(uint16_t) > endp)
return UV_EINVAL;
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
}
if (colonp != NULL) {
/*
* Since some memmove()'s erroneously fail to handle
* overlapping regions, we'll do the shift by hand.
*/
const int n = tp - colonp;
int i;
if (tp == endp)
return UV_EINVAL;
for (i = 1; i <= n; i++) {
endp[- i] = colonp[n - i];
colonp[n - i] = 0;
}
tp = endp;
}
if (tp != endp)
return UV_EINVAL;
memcpy(dst, tmp, sizeof tmp);
return 0;
}

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef QUEUE_H_
#define QUEUE_H_
typedef void *QUEUE[2];
/* Private macros. */
#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0]))
#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1]))
#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
/* Public macros. */
#define QUEUE_DATA(ptr, type, field) \
((type *) ((char *) (ptr) - ((char *) &((type *) 0)->field)))
#define QUEUE_FOREACH(q, h) \
for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q))
#define QUEUE_EMPTY(q) \
((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q))
#define QUEUE_HEAD(q) \
(QUEUE_NEXT(q))
#define QUEUE_INIT(q) \
do { \
QUEUE_NEXT(q) = (q); \
QUEUE_PREV(q) = (q); \
} \
while (0)
#define QUEUE_ADD(h, n) \
do { \
QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \
QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \
QUEUE_PREV(h) = QUEUE_PREV(n); \
QUEUE_PREV_NEXT(h) = (h); \
} \
while (0)
#define QUEUE_SPLIT(h, q, n) \
do { \
QUEUE_PREV(n) = QUEUE_PREV(h); \
QUEUE_PREV_NEXT(n) = (n); \
QUEUE_NEXT(n) = (q); \
QUEUE_PREV(h) = QUEUE_PREV(q); \
QUEUE_PREV_NEXT(h) = (h); \
QUEUE_PREV(q) = (n); \
} \
while (0)
#define QUEUE_INSERT_HEAD(h, q) \
do { \
QUEUE_NEXT(q) = QUEUE_NEXT(h); \
QUEUE_PREV(q) = (h); \
QUEUE_NEXT_PREV(q) = (q); \
QUEUE_NEXT(h) = (q); \
} \
while (0)
#define QUEUE_INSERT_TAIL(h, q) \
do { \
QUEUE_NEXT(q) = (h); \
QUEUE_PREV(q) = QUEUE_PREV(h); \
QUEUE_PREV_NEXT(q) = (q); \
QUEUE_PREV(h) = (q); \
} \
while (0)
#define QUEUE_REMOVE(q) \
do { \
QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \
QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \
} \
while (0)
#endif /* QUEUE_H_ */

View File

@ -0,0 +1,303 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv-common.h"
#if !defined(_WIN32)
# include "unix/internal.h"
#else
# include "win/req-inl.h"
/* TODO(saghul): unify internal req functions */
static void uv__req_init(uv_loop_t* loop,
uv_req_t* req,
uv_req_type type) {
uv_req_init(loop, req);
req->type = type;
uv__req_register(loop, req);
}
# define uv__req_init(loop, req, type) \
uv__req_init((loop), (uv_req_t*)(req), (type))
#endif
#include <stdlib.h>
#define MAX_THREADPOOL_SIZE 128
static uv_once_t once = UV_ONCE_INIT;
static uv_cond_t cond;
static uv_mutex_t mutex;
static unsigned int nthreads;
static uv_thread_t* threads;
static uv_thread_t default_threads[4];
static QUEUE exit_message;
static QUEUE wq;
static volatile int initialized;
static void uv__cancelled(struct uv__work* w) {
abort();
}
/* To avoid deadlock with uv_cancel() it's crucial that the worker
* never holds the global mutex and the loop-local mutex at the same time.
*/
static void worker(void* arg) {
struct uv__work* w;
QUEUE* q;
(void) arg;
for (;;) {
uv_mutex_lock(&mutex);
while (QUEUE_EMPTY(&wq))
uv_cond_wait(&cond, &mutex);
q = QUEUE_HEAD(&wq);
if (q == &exit_message)
uv_cond_signal(&cond);
else {
QUEUE_REMOVE(q);
QUEUE_INIT(q); /* Signal uv_cancel() that the work req is
executing. */
}
uv_mutex_unlock(&mutex);
if (q == &exit_message)
break;
w = QUEUE_DATA(q, struct uv__work, wq);
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
}
}
static void post(QUEUE* q) {
uv_mutex_lock(&mutex);
QUEUE_INSERT_TAIL(&wq, q);
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
}
#ifndef _WIN32
UV_DESTRUCTOR(static void cleanup(void)) {
unsigned int i;
if (initialized == 0)
return;
post(&exit_message);
for (i = 0; i < nthreads; i++)
if (uv_thread_join(threads + i))
abort();
if (threads != default_threads)
free(threads);
uv_mutex_destroy(&mutex);
uv_cond_destroy(&cond);
threads = NULL;
nthreads = 0;
initialized = 0;
}
#endif
static void init_once(void) {
unsigned int i;
const char* val;
nthreads = ARRAY_SIZE(default_threads);
val = getenv("UV_THREADPOOL_SIZE");
if (val != NULL)
nthreads = atoi(val);
if (nthreads == 0)
nthreads = 1;
if (nthreads > MAX_THREADPOOL_SIZE)
nthreads = MAX_THREADPOOL_SIZE;
threads = default_threads;
if (nthreads > ARRAY_SIZE(default_threads)) {
threads = malloc(nthreads * sizeof(threads[0]));
if (threads == NULL) {
nthreads = ARRAY_SIZE(default_threads);
threads = default_threads;
}
}
if (uv_cond_init(&cond))
abort();
if (uv_mutex_init(&mutex))
abort();
QUEUE_INIT(&wq);
for (i = 0; i < nthreads; i++)
if (uv_thread_create(threads + i, worker, NULL))
abort();
initialized = 1;
}
void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
void (*work)(struct uv__work* w),
void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
w->done = done;
post(&w->wq);
}
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
int cancelled;
uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex);
cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
if (cancelled)
QUEUE_REMOVE(&w->wq);
uv_mutex_unlock(&w->loop->wq_mutex);
uv_mutex_unlock(&mutex);
if (!cancelled)
return UV_EBUSY;
w->work = uv__cancelled;
uv_mutex_lock(&loop->wq_mutex);
QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
uv_async_send(&loop->wq_async);
uv_mutex_unlock(&loop->wq_mutex);
return 0;
}
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
QUEUE* q;
QUEUE wq;
int err;
loop = container_of(handle, uv_loop_t, wq_async);
QUEUE_INIT(&wq);
uv_mutex_lock(&loop->wq_mutex);
if (!QUEUE_EMPTY(&loop->wq)) {
q = QUEUE_HEAD(&loop->wq);
QUEUE_SPLIT(&loop->wq, q, &wq);
}
uv_mutex_unlock(&loop->wq_mutex);
while (!QUEUE_EMPTY(&wq)) {
q = QUEUE_HEAD(&wq);
QUEUE_REMOVE(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
w->done(w, err);
}
}
static void uv__queue_work(struct uv__work* w) {
uv_work_t* req = container_of(w, uv_work_t, work_req);
req->work_cb(req);
}
static void uv__queue_done(struct uv__work* w, int err) {
uv_work_t* req;
req = container_of(w, uv_work_t, work_req);
uv__req_unregister(req->loop, req);
if (req->after_work_cb == NULL)
return;
req->after_work_cb(req, err);
}
int uv_queue_work(uv_loop_t* loop,
uv_work_t* req,
uv_work_cb work_cb,
uv_after_work_cb after_work_cb) {
if (work_cb == NULL)
return UV_EINVAL;
uv__req_init(loop, req, UV_WORK);
req->loop = loop;
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
return 0;
}
int uv_cancel(uv_req_t* req) {
struct uv__work* wreq;
uv_loop_t* loop;
switch (req->type) {
case UV_FS:
loop = ((uv_fs_t*) req)->loop;
wreq = &((uv_fs_t*) req)->work_req;
break;
case UV_GETADDRINFO:
loop = ((uv_getaddrinfo_t*) req)->loop;
wreq = &((uv_getaddrinfo_t*) req)->work_req;
break;
case UV_GETNAMEINFO:
loop = ((uv_getnameinfo_t*) req)->loop;
wreq = &((uv_getnameinfo_t*) req)->work_req;
break;
case UV_WORK:
loop = ((uv_work_t*) req)->loop;
wreq = &((uv_work_t*) req)->work_req;
break;
default:
return UV_EINVAL;
}
return uv__work_cancel(loop, req, wreq);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,702 @@
/*
Copyright (c) 2013, Kenneth MacKay
Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement n° 289016)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "android-ifaddrs.h"
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <sys/socket.h>
#include <net/if_arp.h>
#include <netinet/in.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
typedef struct NetlinkList
{
struct NetlinkList *m_next;
struct nlmsghdr *m_data;
unsigned int m_size;
} NetlinkList;
static int netlink_socket(void)
{
struct sockaddr_nl l_addr;
int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if(l_socket < 0)
{
return -1;
}
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
{
close(l_socket);
return -1;
}
return l_socket;
}
static int netlink_send(int p_socket, int p_request)
{
char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
struct nlmsghdr *l_hdr;
struct rtgenmsg *l_msg;
struct sockaddr_nl l_addr;
memset(l_buffer, 0, sizeof(l_buffer));
l_hdr = (struct nlmsghdr *)l_buffer;
l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
l_hdr->nlmsg_type = p_request;
l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
l_hdr->nlmsg_pid = 0;
l_hdr->nlmsg_seq = p_socket;
l_msg->rtgen_family = AF_UNSPEC;
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
}
static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
{
struct sockaddr_nl l_addr;
struct msghdr l_msg;
struct iovec l_iov;
l_iov.iov_base = p_buffer;
l_iov.iov_len = p_len;
for(;;)
{
int l_result;
l_msg.msg_name = (void *)&l_addr;
l_msg.msg_namelen = sizeof(l_addr);
l_msg.msg_iov = &l_iov;
l_msg.msg_iovlen = 1;
l_msg.msg_control = NULL;
l_msg.msg_controllen = 0;
l_msg.msg_flags = 0;
l_result = recvmsg(p_socket, &l_msg, 0);
if(l_result < 0)
{
if(errno == EINTR)
{
continue;
}
return -2;
}
/* Buffer was too small */
if(l_msg.msg_flags & MSG_TRUNC)
{
return -1;
}
return l_result;
}
}
static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_done)
{
size_t l_size = 4096;
void *l_buffer = NULL;
for(;;)
{
int l_read;
free(l_buffer);
l_buffer = malloc(l_size);
if (l_buffer == NULL)
{
return NULL;
}
l_read = netlink_recv(p_socket, l_buffer, l_size);
*p_size = l_read;
if(l_read == -2)
{
free(l_buffer);
return NULL;
}
if(l_read >= 0)
{
pid_t l_pid = getpid();
struct nlmsghdr *l_hdr;
for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
{
if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
*p_done = 1;
break;
}
if(l_hdr->nlmsg_type == NLMSG_ERROR)
{
free(l_buffer);
return NULL;
}
}
return l_buffer;
}
l_size *= 2;
}
}
static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
{
NetlinkList *l_item = malloc(sizeof(NetlinkList));
if (l_item == NULL)
{
return NULL;
}
l_item->m_next = NULL;
l_item->m_data = p_data;
l_item->m_size = p_size;
return l_item;
}
static void freeResultList(NetlinkList *p_list)
{
NetlinkList *l_cur;
while(p_list)
{
l_cur = p_list;
p_list = p_list->m_next;
free(l_cur->m_data);
free(l_cur);
}
}
static NetlinkList *getResultList(int p_socket, int p_request)
{
int l_size;
int l_done;
NetlinkList *l_list;
NetlinkList *l_end;
if(netlink_send(p_socket, p_request) < 0)
{
return NULL;
}
l_list = NULL;
l_end = NULL;
l_done = 0;
while(!l_done)
{
NetlinkList *l_item;
struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, &l_size, &l_done);
/* Error */
if(!l_hdr)
{
freeResultList(l_list);
return NULL;
}
l_item = newListItem(l_hdr, l_size);
if (!l_item)
{
freeResultList(l_list);
return NULL;
}
if(!l_list)
{
l_list = l_item;
}
else
{
l_end->m_next = l_item;
}
l_end = l_item;
}
return l_list;
}
static size_t maxSize(size_t a, size_t b)
{
return (a > b ? a : b);
}
static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
{
switch(p_family)
{
case AF_INET:
return sizeof(struct sockaddr_in);
case AF_INET6:
return sizeof(struct sockaddr_in6);
case AF_PACKET:
return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
default:
return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
}
}
static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
{
switch(p_family)
{
case AF_INET:
memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
break;
case AF_INET6:
memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
break;
case AF_PACKET:
memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
break;
default:
memcpy(p_dest->sa_data, p_data, p_size);
break;
}
p_dest->sa_family = p_family;
}
static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
{
if(!*p_resultList)
{
*p_resultList = p_entry;
}
else
{
struct ifaddrs *l_cur = *p_resultList;
while(l_cur->ifa_next)
{
l_cur = l_cur->ifa_next;
}
l_cur->ifa_next = p_entry;
}
}
static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList)
{
struct ifaddrs *l_entry;
char *l_index;
char *l_name;
char *l_addr;
char *l_data;
struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
size_t l_dataSize = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
struct rtattr *l_rta;
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
break;
case IFLA_IFNAME:
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
break;
case IFLA_STATS:
l_dataSize += NLMSG_ALIGN(l_rtaSize);
break;
default:
break;
}
}
l_entry = malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = "";
l_index = ((char *)l_entry) + sizeof(struct ifaddrs);
l_name = l_index + sizeof(int);
l_addr = l_name + l_nameSize;
l_data = l_addr + l_addrSize;
/* Save the interface index so we can look it up when handling the
* addresses.
*/
memcpy(l_index, &l_info->ifi_index, sizeof(int));
l_entry->ifa_flags = l_info->ifi_flags;
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
{
size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
if(l_rta->rta_type == IFLA_ADDRESS)
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFLA_IFNAME:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
case IFLA_STATS:
memcpy(l_data, l_rtaData, l_rtaDataSize);
l_entry->ifa_data = l_data;
break;
default:
break;
}
}
addToEnd(p_resultList, l_entry);
return 0;
}
static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks)
{
int l_num = 0;
struct ifaddrs *l_cur = *p_links;
while(l_cur && l_num < p_numLinks)
{
char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs);
int l_index;
memcpy(&l_index, l_indexPtr, sizeof(int));
if(l_index == p_index)
{
return l_cur;
}
l_cur = l_cur->ifa_next;
++l_num;
}
return NULL;
}
static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks)
{
struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
int l_addedNetmask = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
struct rtattr *l_rta;
struct ifaddrs *l_entry;
char *l_name;
char *l_addr;
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
if(l_info->ifa_family == AF_PACKET)
{
continue;
}
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_LOCAL:
if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
{
/* Make room for netmask */
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
l_addedNetmask = 1;
}
case IFA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
break;
case IFA_LABEL:
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
break;
default:
break;
}
}
l_entry = malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = (l_interface ? l_interface->ifa_name : "");
l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
l_addr = l_name + l_nameSize;
l_entry->ifa_flags = l_info->ifa_flags;
if(l_interface)
{
l_entry->ifa_flags |= l_interface->ifa_flags;
}
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_BROADCAST:
case IFA_LOCAL:
{
size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
if(l_info->ifa_family == AF_INET6)
{
if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
{
((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
}
}
/* Apparently in a point-to-point network IFA_ADDRESS contains
* the dest address and IFA_LOCAL contains the local address
*/
if(l_rta->rta_type == IFA_ADDRESS)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
}
else if(l_rta->rta_type == IFA_LOCAL)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = l_entry->ifa_addr;
}
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFA_LABEL:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
default:
break;
}
}
if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
{
unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
char l_mask[16] = {0};
unsigned i;
for(i=0; i<(l_prefix/8); ++i)
{
l_mask[i] = 0xff;
}
if(l_prefix % 8)
{
l_mask[i] = 0xff << (8 - (l_prefix % 8));
}
makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
l_entry->ifa_netmask = (struct sockaddr *)l_addr;
}
addToEnd(p_resultList, l_entry);
return 0;
}
static int interpretLinks(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
{
int l_numLinks = 0;
pid_t l_pid = getpid();
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWLINK)
{
if(interpretLink(l_hdr, p_resultList) == -1)
{
return -1;
}
++l_numLinks;
}
}
}
return l_numLinks;
}
static int interpretAddrs(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
{
pid_t l_pid = getpid();
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWADDR)
{
if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1)
{
return -1;
}
}
}
}
return 0;
}
int getifaddrs(struct ifaddrs **ifap)
{
int l_socket;
int l_result;
int l_numLinks;
NetlinkList *l_linkResults;
NetlinkList *l_addrResults;
if(!ifap)
{
return -1;
}
*ifap = NULL;
l_socket = netlink_socket();
if(l_socket < 0)
{
return -1;
}
l_linkResults = getResultList(l_socket, RTM_GETLINK);
if(!l_linkResults)
{
close(l_socket);
return -1;
}
l_addrResults = getResultList(l_socket, RTM_GETADDR);
if(!l_addrResults)
{
close(l_socket);
freeResultList(l_linkResults);
return -1;
}
l_result = 0;
l_numLinks = interpretLinks(l_socket, l_linkResults, ifap);
if(l_numLinks == -1 || interpretAddrs(l_socket, l_addrResults, ifap, l_numLinks) == -1)
{
l_result = -1;
}
freeResultList(l_linkResults);
freeResultList(l_addrResults);
close(l_socket);
return l_result;
}
void freeifaddrs(struct ifaddrs *ifa)
{
struct ifaddrs *l_cur;
while(ifa)
{
l_cur = ifa;
ifa = ifa->ifa_next;
free(l_cur);
}
}

View File

@ -0,0 +1,312 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* This file contains both the uv__async internal infrastructure and the
* user-facing uv_async_t functions.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stdio.h> /* snprintf() */
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static void uv__async_event(uv_loop_t* loop,
struct uv__async* w,
unsigned int nevents);
static int uv__async_make_pending(int* pending);
static int uv__async_eventfd(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int err;
err = uv__async_start(loop, &loop->async_watcher, uv__async_event);
if (err)
return err;
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb;
handle->pending = 0;
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
return 0;
}
int uv_async_send(uv_async_t* handle) {
if (uv__async_make_pending(&handle->pending) == 0)
uv__async_send(&handle->loop->async_watcher);
return 0;
}
void uv__async_close(uv_async_t* handle) {
QUEUE_REMOVE(&handle->queue);
uv__handle_stop(handle);
}
static void uv__async_event(uv_loop_t* loop,
struct uv__async* w,
unsigned int nevents) {
QUEUE* q;
uv_async_t* h;
QUEUE_FOREACH(q, &loop->async_handles) {
h = QUEUE_DATA(q, uv_async_t, queue);
if (h->pending == 0)
continue;
h->pending = 0;
if (h->async_cb == NULL)
continue;
h->async_cb(h);
}
}
static int uv__async_make_pending(int* pending) {
/* Do a cheap read first. */
if (ACCESS_ONCE(int, *pending) != 0)
return 1;
/* Micro-optimization: use atomic memory operations to detect if we've been
* preempted by another thread and don't have to make an expensive syscall.
* This speeds up the heavily contended case by about 1-2% and has little
* if any impact on the non-contended case.
*
* Use XCHG instead of the CMPXCHG that __sync_val_compare_and_swap() emits
* on x86, it's about 4x faster. It probably makes zero difference in the
* grand scheme of things but I'm OCD enough not to let this one pass.
*/
#if defined(__i386__) || defined(__x86_64__)
{
unsigned int val = 1;
__asm__ __volatile__ ("xchgl %0, %1"
: "+r" (val)
: "m" (*pending));
return val != 0;
}
#elif defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ > 0)
return __sync_val_compare_and_swap(pending, 0, 1) != 0;
#else
ACCESS_ONCE(int, *pending) = 1;
return 0;
#endif
}
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
struct uv__async* wa;
char buf[1024];
unsigned n;
ssize_t r;
n = 0;
for (;;) {
r = read(w->fd, buf, sizeof(buf));
if (r > 0)
n += r;
if (r == sizeof(buf))
continue;
if (r != -1)
break;
if (errno == EAGAIN || errno == EWOULDBLOCK)
break;
if (errno == EINTR)
continue;
abort();
}
wa = container_of(w, struct uv__async, io_watcher);
#if defined(__linux__)
if (wa->wfd == -1) {
uint64_t val;
assert(n == sizeof(val));
memcpy(&val, buf, sizeof(val)); /* Avoid alignment issues. */
wa->cb(loop, wa, val);
return;
}
#endif
wa->cb(loop, wa, n);
}
void uv__async_send(struct uv__async* wa) {
const void* buf;
ssize_t len;
int fd;
int r;
buf = "";
len = 1;
fd = wa->wfd;
#if defined(__linux__)
if (fd == -1) {
static const uint64_t val = 1;
buf = &val;
len = sizeof(val);
fd = wa->io_watcher.fd; /* eventfd */
}
#endif
do
r = write(fd, buf, len);
while (r == -1 && errno == EINTR);
if (r == len)
return;
if (r == -1)
if (errno == EAGAIN || errno == EWOULDBLOCK)
return;
abort();
}
void uv__async_init(struct uv__async* wa) {
wa->io_watcher.fd = -1;
wa->wfd = -1;
}
int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb) {
int pipefd[2];
int err;
if (wa->io_watcher.fd != -1)
return 0;
err = uv__async_eventfd();
if (err >= 0) {
pipefd[0] = err;
pipefd[1] = -1;
}
else if (err == -ENOSYS) {
err = uv__make_pipe(pipefd, UV__F_NONBLOCK);
#if defined(__linux__)
/* Save a file descriptor by opening one of the pipe descriptors as
* read/write through the procfs. That file descriptor can then
* function as both ends of the pipe.
*/
if (err == 0) {
char buf[32];
int fd;
snprintf(buf, sizeof(buf), "/proc/self/fd/%d", pipefd[0]);
fd = uv__open_cloexec(buf, O_RDWR);
if (fd >= 0) {
uv__close(pipefd[0]);
uv__close(pipefd[1]);
pipefd[0] = fd;
pipefd[1] = fd;
}
}
#endif
}
if (err < 0)
return err;
uv__io_init(&wa->io_watcher, uv__async_io, pipefd[0]);
uv__io_start(loop, &wa->io_watcher, UV__POLLIN);
wa->wfd = pipefd[1];
wa->cb = cb;
return 0;
}
void uv__async_stop(uv_loop_t* loop, struct uv__async* wa) {
if (wa->io_watcher.fd == -1)
return;
if (wa->wfd != -1) {
if (wa->wfd != wa->io_watcher.fd)
uv__close(wa->wfd);
wa->wfd = -1;
}
uv__io_stop(loop, &wa->io_watcher, UV__POLLIN);
uv__close(wa->io_watcher.fd);
wa->io_watcher.fd = -1;
}
static int uv__async_eventfd() {
#if defined(__linux__)
static int no_eventfd2;
static int no_eventfd;
int fd;
if (no_eventfd2)
goto skip_eventfd2;
fd = uv__eventfd2(0, UV__EFD_CLOEXEC | UV__EFD_NONBLOCK);
if (fd != -1)
return fd;
if (errno != ENOSYS)
return -errno;
no_eventfd2 = 1;
skip_eventfd2:
if (no_eventfd)
goto skip_eventfd;
fd = uv__eventfd(0);
if (fd != -1) {
uv__cloexec(fd, 1);
uv__nonblock(fd, 1);
return fd;
}
if (errno != ENOSYS)
return -errno;
no_eventfd = 1;
skip_eventfd:
#endif
return -ENOSYS;
}

View File

@ -0,0 +1,60 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_ATOMIC_OPS_H_
#define UV_ATOMIC_OPS_H_
#include "internal.h" /* UV_UNUSED */
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval));
UV_UNUSED(static void cpu_relax(void));
/* Prefer hand-rolled assembly over the gcc builtins because the latter also
* issue full memory barriers.
*/
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
#if defined(__i386__) || defined(__x86_64__)
int out;
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
: "=a" (out), "+m" (*(volatile int*) ptr)
: "r" (newval), "0" (oldval)
: "memory");
return out;
#else
return __sync_val_compare_and_swap(ptr, oldval, newval);
#endif
}
UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval)) {
#if defined(__i386__) || defined(__x86_64__)
long out;
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
: "=a" (out), "+m" (*(volatile long*) ptr)
: "r" (newval), "0" (oldval)
: "memory");
return out;
#else
return __sync_val_compare_and_swap(ptr, oldval, newval);
#endif
}
UV_UNUSED(static void cpu_relax(void)) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop"); /* a.k.a. PAUSE */
#endif
}
#endif /* UV_ATOMIC_OPS_H_ */

View File

@ -0,0 +1,948 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stddef.h> /* NULL */
#include <stdio.h> /* printf */
#include <stdlib.h>
#include <string.h> /* strerror */
#include <errno.h>
#include <assert.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <limits.h> /* INT_MAX, PATH_MAX */
#include <sys/uio.h> /* writev */
#include <sys/resource.h> /* getrusage */
#ifdef __linux__
# include <sys/ioctl.h>
#endif
#ifdef __sun
# include <sys/types.h>
# include <sys/wait.h>
#endif
#ifdef __APPLE__
# include <mach-o/dyld.h> /* _NSGetExecutablePath */
# include <sys/filio.h>
# include <sys/ioctl.h>
#endif
#ifdef __FreeBSD__
# include <sys/sysctl.h>
# include <sys/filio.h>
# include <sys/ioctl.h>
# include <sys/wait.h>
# define UV__O_CLOEXEC O_CLOEXEC
# if __FreeBSD__ >= 10
# define uv__accept4 accept4
# define UV__SOCK_NONBLOCK SOCK_NONBLOCK
# define UV__SOCK_CLOEXEC SOCK_CLOEXEC
# endif
# if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
# define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC
# endif
#endif
#ifdef _AIX
#include <sys/ioctl.h>
#endif
static void uv__run_pending(uv_loop_t* loop);
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
sizeof(((struct iovec*) 0)->iov_base));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
sizeof(((struct iovec*) 0)->iov_len));
STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
uint64_t uv_hrtime(void) {
return uv__hrtime(UV_CLOCK_PRECISE);
}
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
handle->flags |= UV_CLOSING;
handle->close_cb = close_cb;
switch (handle->type) {
case UV_NAMED_PIPE:
uv__pipe_close((uv_pipe_t*)handle);
break;
case UV_TTY:
uv__stream_close((uv_stream_t*)handle);
break;
case UV_TCP:
uv__tcp_close((uv_tcp_t*)handle);
break;
case UV_UDP:
uv__udp_close((uv_udp_t*)handle);
break;
case UV_PREPARE:
uv__prepare_close((uv_prepare_t*)handle);
break;
case UV_CHECK:
uv__check_close((uv_check_t*)handle);
break;
case UV_IDLE:
uv__idle_close((uv_idle_t*)handle);
break;
case UV_ASYNC:
uv__async_close((uv_async_t*)handle);
break;
case UV_TIMER:
uv__timer_close((uv_timer_t*)handle);
break;
case UV_PROCESS:
uv__process_close((uv_process_t*)handle);
break;
case UV_FS_EVENT:
uv__fs_event_close((uv_fs_event_t*)handle);
break;
case UV_POLL:
uv__poll_close((uv_poll_t*)handle);
break;
case UV_FS_POLL:
uv__fs_poll_close((uv_fs_poll_t*)handle);
break;
case UV_SIGNAL:
uv__signal_close((uv_signal_t*) handle);
/* Signal handles may not be closed immediately. The signal code will */
/* itself close uv__make_close_pending whenever appropriate. */
return;
default:
assert(0);
}
uv__make_close_pending(handle);
}
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
int r;
int fd;
socklen_t len;
if (handle == NULL || value == NULL)
return -EINVAL;
if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
fd = uv__stream_fd((uv_stream_t*) handle);
else if (handle->type == UV_UDP)
fd = ((uv_udp_t *) handle)->io_watcher.fd;
else
return -ENOTSUP;
len = sizeof(*value);
if (*value == 0)
r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
else
r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
if (r < 0)
return -errno;
return 0;
}
void uv__make_close_pending(uv_handle_t* handle) {
assert(handle->flags & UV_CLOSING);
assert(!(handle->flags & UV_CLOSED));
handle->next_closing = handle->loop->closing_handles;
handle->loop->closing_handles = handle;
}
static void uv__finish_close(uv_handle_t* handle) {
/* Note: while the handle is in the UV_CLOSING state now, it's still possible
* for it to be active in the sense that uv__is_active() returns true.
* A good example is when the user calls uv_shutdown(), immediately followed
* by uv_close(). The handle is considered active at this point because the
* completion of the shutdown req is still pending.
*/
assert(handle->flags & UV_CLOSING);
assert(!(handle->flags & UV_CLOSED));
handle->flags |= UV_CLOSED;
switch (handle->type) {
case UV_PREPARE:
case UV_CHECK:
case UV_IDLE:
case UV_ASYNC:
case UV_TIMER:
case UV_PROCESS:
case UV_FS_EVENT:
case UV_FS_POLL:
case UV_POLL:
case UV_SIGNAL:
break;
case UV_NAMED_PIPE:
case UV_TCP:
case UV_TTY:
uv__stream_destroy((uv_stream_t*)handle);
break;
case UV_UDP:
uv__udp_finish_close((uv_udp_t*)handle);
break;
default:
assert(0);
break;
}
uv__handle_unref(handle);
QUEUE_REMOVE(&handle->handle_queue);
if (handle->close_cb) {
handle->close_cb(handle);
}
}
static void uv__run_closing_handles(uv_loop_t* loop) {
uv_handle_t* p;
uv_handle_t* q;
p = loop->closing_handles;
loop->closing_handles = NULL;
while (p) {
q = p->next_closing;
uv__finish_close(p);
p = q;
}
}
int uv_is_closing(const uv_handle_t* handle) {
return uv__is_closing(handle);
}
int uv_backend_fd(const uv_loop_t* loop) {
return loop->backend_fd;
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (!QUEUE_EMPTY(&loop->idle_handles))
return 0;
if (loop->closing_handles)
return 0;
return uv__next_timeout(loop);
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->closing_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_run(uv_loop_t* loop, uv_run_mode mode) {
int timeout;
int r;
r = uv__loop_alive(loop);
if (!r)
uv__update_time(loop);
while (r != 0 && loop->stop_flag == 0) {
UV_TICK_START(loop, mode);
uv__update_time(loop);
uv__run_timers(loop);
uv__run_pending(loop);
uv__run_idle(loop);
uv__run_prepare(loop);
timeout = 0;
if ((mode & UV_RUN_NOWAIT) == 0)
timeout = uv_backend_timeout(loop);
uv__io_poll(loop, timeout);
uv__run_check(loop);
uv__run_closing_handles(loop);
if (mode == UV_RUN_ONCE) {
/* UV_RUN_ONCE implies forward progess: at least one callback must have
* been invoked when it returns. uv__io_poll() can return without doing
* I/O (meaning: no callbacks) when its timeout expires - which means we
* have pending timers that satisfy the forward progress constraint.
*
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv__update_time(loop);
uv__run_timers(loop);
}
r = uv__loop_alive(loop);
UV_TICK_STOP(loop, mode);
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
break;
}
/* The if statement lets gcc compile it to a conditional store. Avoids
* dirtying a cache line.
*/
if (loop->stop_flag != 0)
loop->stop_flag = 0;
return r;
}
void uv_update_time(uv_loop_t* loop) {
uv__update_time(loop);
}
int uv_is_active(const uv_handle_t* handle) {
return uv__is_active(handle);
}
/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
int uv__socket(int domain, int type, int protocol) {
int sockfd;
int err;
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
if (sockfd != -1)
return sockfd;
if (errno != EINVAL)
return -errno;
#endif
sockfd = socket(domain, type, protocol);
if (sockfd == -1)
return -errno;
err = uv__nonblock(sockfd, 1);
if (err == 0)
err = uv__cloexec(sockfd, 1);
if (err) {
uv__close(sockfd);
return err;
}
#if defined(SO_NOSIGPIPE)
{
int on = 1;
setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
}
#endif
return sockfd;
}
int uv__accept(int sockfd) {
int peerfd;
int err;
assert(sockfd >= 0);
while (1) {
#if defined(__linux__) || __FreeBSD__ >= 10
static int no_accept4;
if (no_accept4)
goto skip;
peerfd = uv__accept4(sockfd,
NULL,
NULL,
UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
if (peerfd != -1)
return peerfd;
if (errno == EINTR)
continue;
if (errno != ENOSYS)
return -errno;
no_accept4 = 1;
skip:
#endif
peerfd = accept(sockfd, NULL, NULL);
if (peerfd == -1) {
if (errno == EINTR)
continue;
return -errno;
}
err = uv__cloexec(peerfd, 1);
if (err == 0)
err = uv__nonblock(peerfd, 1);
if (err) {
uv__close(peerfd);
return err;
}
return peerfd;
}
}
int uv__close(int fd) {
int saved_errno;
int rc;
assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
saved_errno = errno;
rc = close(fd);
if (rc == -1) {
rc = -errno;
if (rc == -EINTR)
rc = -EINPROGRESS; /* For platform/libc consistency. */
errno = saved_errno;
}
return rc;
}
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
defined(_AIX)
int uv__nonblock(int fd, int set) {
int r;
do
r = ioctl(fd, FIONBIO, &set);
while (r == -1 && errno == EINTR);
if (r)
return -errno;
return 0;
}
int uv__cloexec(int fd, int set) {
int r;
do
r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
while (r == -1 && errno == EINTR);
if (r)
return -errno;
return 0;
}
#else /* !(defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) */
int uv__nonblock(int fd, int set) {
int flags;
int r;
do
r = fcntl(fd, F_GETFL);
while (r == -1 && errno == EINTR);
if (r == -1)
return -errno;
/* Bail out now if already set/clear. */
if (!!(r & O_NONBLOCK) == !!set)
return 0;
if (set)
flags = r | O_NONBLOCK;
else
flags = r & ~O_NONBLOCK;
do
r = fcntl(fd, F_SETFL, flags);
while (r == -1 && errno == EINTR);
if (r)
return -errno;
return 0;
}
int uv__cloexec(int fd, int set) {
int flags;
int r;
do
r = fcntl(fd, F_GETFD);
while (r == -1 && errno == EINTR);
if (r == -1)
return -errno;
/* Bail out now if already set/clear. */
if (!!(r & FD_CLOEXEC) == !!set)
return 0;
if (set)
flags = r | FD_CLOEXEC;
else
flags = r & ~FD_CLOEXEC;
do
r = fcntl(fd, F_SETFD, flags);
while (r == -1 && errno == EINTR);
if (r)
return -errno;
return 0;
}
#endif /* defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) */
/* This function is not execve-safe, there is a race window
* between the call to dup() and fcntl(FD_CLOEXEC).
*/
int uv__dup(int fd) {
int err;
fd = dup(fd);
if (fd == -1)
return -errno;
err = uv__cloexec(fd, 1);
if (err) {
uv__close(fd);
return err;
}
return fd;
}
ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
struct cmsghdr* cmsg;
ssize_t rc;
int* pfd;
int* end;
#if defined(__linux__)
static int no_msg_cmsg_cloexec;
if (no_msg_cmsg_cloexec == 0) {
rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
if (rc != -1)
return rc;
if (errno != EINVAL)
return -errno;
rc = recvmsg(fd, msg, flags);
if (rc == -1)
return -errno;
no_msg_cmsg_cloexec = 1;
} else {
rc = recvmsg(fd, msg, flags);
}
#else
rc = recvmsg(fd, msg, flags);
#endif
if (rc == -1)
return -errno;
if (msg->msg_controllen == 0)
return rc;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
if (cmsg->cmsg_type == SCM_RIGHTS)
for (pfd = (int*) CMSG_DATA(cmsg),
end = (int*) ((char*) cmsg + cmsg->cmsg_len);
pfd < end;
pfd += 1)
uv__cloexec(*pfd, 1);
return rc;
}
int uv_cwd(char* buffer, size_t* size) {
if (buffer == NULL || size == NULL)
return -EINVAL;
if (getcwd(buffer, *size) == NULL)
return -errno;
*size = strlen(buffer) + 1;
return 0;
}
int uv_chdir(const char* dir) {
if (chdir(dir))
return -errno;
return 0;
}
void uv_disable_stdio_inheritance(void) {
int fd;
/* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
* first 16 file descriptors. After that, bail out after the first error.
*/
for (fd = 0; ; fd++)
if (uv__cloexec(fd, 1) && fd > 15)
break;
}
static void uv__run_pending(uv_loop_t* loop) {
QUEUE* q;
uv__io_t* w;
while (!QUEUE_EMPTY(&loop->pending_queue)) {
q = QUEUE_HEAD(&loop->pending_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, pending_queue);
w->cb(loop, w, UV__POLLOUT);
}
}
static unsigned int next_power_of_two(unsigned int val) {
val -= 1;
val |= val >> 1;
val |= val >> 2;
val |= val >> 4;
val |= val >> 8;
val |= val >> 16;
val += 1;
return val;
}
static void maybe_resize(uv_loop_t* loop, unsigned int len) {
uv__io_t** watchers;
void* fake_watcher_list;
void* fake_watcher_count;
unsigned int nwatchers;
unsigned int i;
if (len <= loop->nwatchers)
return;
/* Preserve fake watcher list and count at the end of the watchers */
if (loop->watchers != NULL) {
fake_watcher_list = loop->watchers[loop->nwatchers];
fake_watcher_count = loop->watchers[loop->nwatchers + 1];
} else {
fake_watcher_list = NULL;
fake_watcher_count = NULL;
}
nwatchers = next_power_of_two(len + 2) - 2;
watchers = realloc(loop->watchers,
(nwatchers + 2) * sizeof(loop->watchers[0]));
if (watchers == NULL)
abort();
for (i = loop->nwatchers; i < nwatchers; i++)
watchers[i] = NULL;
watchers[nwatchers] = fake_watcher_list;
watchers[nwatchers + 1] = fake_watcher_count;
loop->watchers = watchers;
loop->nwatchers = nwatchers;
}
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
assert(cb != NULL);
assert(fd >= -1);
QUEUE_INIT(&w->pending_queue);
QUEUE_INIT(&w->watcher_queue);
w->cb = cb;
w->fd = fd;
w->events = 0;
w->pevents = 0;
#if defined(UV_HAVE_KQUEUE)
w->rcount = 0;
w->wcount = 0;
#endif /* defined(UV_HAVE_KQUEUE) */
}
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
assert(0 != events);
assert(w->fd >= 0);
assert(w->fd < INT_MAX);
w->pevents |= events;
maybe_resize(loop, w->fd + 1);
#if !defined(__sun)
/* The event ports backend needs to rearm all file descriptors on each and
* every tick of the event loop but the other backends allow us to
* short-circuit here if the event mask is unchanged.
*/
if (w->events == w->pevents) {
if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) {
QUEUE_REMOVE(&w->watcher_queue);
QUEUE_INIT(&w->watcher_queue);
}
return;
}
#endif
if (QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
if (loop->watchers[w->fd] == NULL) {
loop->watchers[w->fd] = w;
loop->nfds++;
}
}
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
assert(0 != events);
if (w->fd == -1)
return;
assert(w->fd >= 0);
/* Happens when uv__io_stop() is called on a handle that was never started. */
if ((unsigned) w->fd >= loop->nwatchers)
return;
w->pevents &= ~events;
if (w->pevents == 0) {
QUEUE_REMOVE(&w->watcher_queue);
QUEUE_INIT(&w->watcher_queue);
if (loop->watchers[w->fd] != NULL) {
assert(loop->watchers[w->fd] == w);
assert(loop->nfds > 0);
loop->watchers[w->fd] = NULL;
loop->nfds--;
w->events = 0;
}
}
else if (QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
}
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
QUEUE_REMOVE(&w->pending_queue);
/* Remove stale events for this file descriptor */
uv__platform_invalidate_fd(loop, w->fd);
}
void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
if (QUEUE_EMPTY(&w->pending_queue))
QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
}
int uv__io_active(const uv__io_t* w, unsigned int events) {
assert(0 == (events & ~(UV__POLLIN | UV__POLLOUT)));
assert(0 != events);
return 0 != (w->pevents & events);
}
int uv_getrusage(uv_rusage_t* rusage) {
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage))
return -errno;
rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
rusage->ru_maxrss = usage.ru_maxrss;
rusage->ru_ixrss = usage.ru_ixrss;
rusage->ru_idrss = usage.ru_idrss;
rusage->ru_isrss = usage.ru_isrss;
rusage->ru_minflt = usage.ru_minflt;
rusage->ru_majflt = usage.ru_majflt;
rusage->ru_nswap = usage.ru_nswap;
rusage->ru_inblock = usage.ru_inblock;
rusage->ru_oublock = usage.ru_oublock;
rusage->ru_msgsnd = usage.ru_msgsnd;
rusage->ru_msgrcv = usage.ru_msgrcv;
rusage->ru_nsignals = usage.ru_nsignals;
rusage->ru_nvcsw = usage.ru_nvcsw;
rusage->ru_nivcsw = usage.ru_nivcsw;
return 0;
}
int uv__open_cloexec(const char* path, int flags) {
int err;
int fd;
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD__ >= 9)
static int no_cloexec;
if (!no_cloexec) {
fd = open(path, flags | UV__O_CLOEXEC);
if (fd != -1)
return fd;
if (errno != EINVAL)
return -errno;
/* O_CLOEXEC not supported. */
no_cloexec = 1;
}
#endif
fd = open(path, flags);
if (fd == -1)
return -errno;
err = uv__cloexec(fd, 1);
if (err) {
uv__close(fd);
return err;
}
return fd;
}
int uv__dup2_cloexec(int oldfd, int newfd) {
int r;
#if defined(__FreeBSD__) && __FreeBSD__ >= 10
do
r = dup3(oldfd, newfd, O_CLOEXEC);
while (r == -1 && errno == EINTR);
if (r == -1)
return -errno;
return r;
#elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
do
r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
while (r == -1 && errno == EINTR);
if (r != -1)
return r;
if (errno != EINVAL)
return -errno;
/* Fall through. */
#elif defined(__linux__)
static int no_dup3;
if (!no_dup3) {
do
r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
while (r == -1 && (errno == EINTR || errno == EBUSY));
if (r != -1)
return r;
if (errno != ENOSYS)
return -errno;
/* Fall through. */
no_dup3 = 1;
}
#endif
{
int err;
do
r = dup2(oldfd, newfd);
#if defined(__linux__)
while (r == -1 && (errno == EINTR || errno == EBUSY));
#else
while (r == -1 && errno == EINTR);
#endif
if (r == -1)
return -errno;
err = uv__cloexec(newfd, 1);
if (err) {
uv__close(newfd);
return err;
}
return r;
}
}

View File

@ -0,0 +1,203 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <dlfcn.h>
#include <errno.h>
#include <stdlib.h>
#include <TargetConditionals.h>
#if !TARGET_OS_IPHONE
# include <CoreFoundation/CoreFoundation.h>
# include <ApplicationServices/ApplicationServices.h>
#endif
static int uv__pthread_setname_np(const char* name) {
int (*dynamic_pthread_setname_np)(const char* name);
char namebuf[64]; /* MAXTHREADNAMESIZE */
int err;
/* pthread_setname_np() first appeared in OS X 10.6 and iOS 3.2. */
dynamic_pthread_setname_np = dlsym(RTLD_DEFAULT, "pthread_setname_np");
if (dynamic_pthread_setname_np == NULL)
return -ENOSYS;
strncpy(namebuf, name, sizeof(namebuf) - 1);
namebuf[sizeof(namebuf) - 1] = '\0';
err = dynamic_pthread_setname_np(namebuf);
if (err)
return -err;
return 0;
}
int uv__set_process_title(const char* title) {
#if TARGET_OS_IPHONE
return uv__pthread_setname_np(title);
#else
CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
const char*,
CFStringEncoding);
CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef);
void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef);
void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef);
CFTypeRef (*pLSGetCurrentApplicationASN)(void);
OSStatus (*pLSSetApplicationInformationItem)(int,
CFTypeRef,
CFStringRef,
CFStringRef,
CFDictionaryRef*);
void* application_services_handle;
void* core_foundation_handle;
CFBundleRef launch_services_bundle;
CFStringRef* display_name_key;
CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef);
CFBundleRef (*pCFBundleGetMainBundle)(void);
CFBundleRef hi_services_bundle;
OSStatus (*pSetApplicationIsDaemon)(int);
CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef);
void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t,
void*);
CFTypeRef asn;
int err;
err = -ENOENT;
application_services_handle = dlopen("/System/Library/Frameworks/"
"ApplicationServices.framework/"
"Versions/A/ApplicationServices",
RTLD_LAZY | RTLD_LOCAL);
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"Versions/A/CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
if (application_services_handle == NULL || core_foundation_handle == NULL)
goto out;
pCFStringCreateWithCString =
dlsym(core_foundation_handle, "CFStringCreateWithCString");
pCFBundleGetBundleWithIdentifier =
dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier");
pCFBundleGetDataPointerForName =
dlsym(core_foundation_handle, "CFBundleGetDataPointerForName");
pCFBundleGetFunctionPointerForName =
dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName");
if (pCFStringCreateWithCString == NULL ||
pCFBundleGetBundleWithIdentifier == NULL ||
pCFBundleGetDataPointerForName == NULL ||
pCFBundleGetFunctionPointerForName == NULL) {
goto out;
}
#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
launch_services_bundle =
pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices"));
if (launch_services_bundle == NULL)
goto out;
pLSGetCurrentApplicationASN =
pCFBundleGetFunctionPointerForName(launch_services_bundle,
S("_LSGetCurrentApplicationASN"));
if (pLSGetCurrentApplicationASN == NULL)
goto out;
pLSSetApplicationInformationItem =
pCFBundleGetFunctionPointerForName(launch_services_bundle,
S("_LSSetApplicationInformationItem"));
if (pLSSetApplicationInformationItem == NULL)
goto out;
display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle,
S("_kLSDisplayNameKey"));
if (display_name_key == NULL || *display_name_key == NULL)
goto out;
pCFBundleGetInfoDictionary = dlsym(core_foundation_handle,
"CFBundleGetInfoDictionary");
pCFBundleGetMainBundle = dlsym(core_foundation_handle,
"CFBundleGetMainBundle");
if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL)
goto out;
/* Black 10.9 magic, to remove (Not responding) mark in Activity Monitor */
hi_services_bundle =
pCFBundleGetBundleWithIdentifier(S("com.apple.HIServices"));
err = -ENOENT;
if (hi_services_bundle == NULL)
goto out;
pSetApplicationIsDaemon = pCFBundleGetFunctionPointerForName(
hi_services_bundle,
S("SetApplicationIsDaemon"));
pLSApplicationCheckIn = pCFBundleGetFunctionPointerForName(
launch_services_bundle,
S("_LSApplicationCheckIn"));
pLSSetApplicationLaunchServicesServerConnectionStatus =
pCFBundleGetFunctionPointerForName(
launch_services_bundle,
S("_LSSetApplicationLaunchServicesServerConnectionStatus"));
if (pSetApplicationIsDaemon == NULL ||
pLSApplicationCheckIn == NULL ||
pLSSetApplicationLaunchServicesServerConnectionStatus == NULL) {
goto out;
}
if (pSetApplicationIsDaemon(1) != noErr)
goto out;
pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL);
/* Check into process manager?! */
pLSApplicationCheckIn(-2,
pCFBundleGetInfoDictionary(pCFBundleGetMainBundle()));
asn = pLSGetCurrentApplicationASN();
err = -EINVAL;
if (pLSSetApplicationInformationItem(-2, /* Magic value. */
asn,
*display_name_key,
S(title),
NULL) != noErr) {
goto out;
}
uv__pthread_setname_np(title); /* Don't care if it fails. */
err = 0;
out:
if (core_foundation_handle != NULL)
dlclose(core_foundation_handle);
if (application_services_handle != NULL)
dlclose(application_services_handle);
return err;
#endif /* !TARGET_OS_IPHONE */
}

View File

@ -0,0 +1,326 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdint.h>
#include <errno.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <mach-o/dyld.h> /* _NSGetExecutablePath */
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
loop->cf_state = NULL;
if (uv__kqueue_init(loop))
return -errno;
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
uv__fsevents_loop_delete(loop);
}
uint64_t uv__hrtime(uv_clocktype_t type) {
static mach_timebase_info_data_t info;
if ((ACCESS_ONCE(uint32_t, info.numer) == 0 ||
ACCESS_ONCE(uint32_t, info.denom) == 0) &&
mach_timebase_info(&info) != KERN_SUCCESS)
abort();
return mach_absolute_time() * info.numer / info.denom;
}
int uv_exepath(char* buffer, size_t* size) {
uint32_t usize;
int result;
char* path;
char* fullpath;
if (buffer == NULL || size == NULL)
return -EINVAL;
usize = *size;
result = _NSGetExecutablePath(buffer, &usize);
if (result) return result;
path = malloc(2 * PATH_MAX);
fullpath = realpath(buffer, path);
if (fullpath == NULL) {
SAVE_ERRNO(free(path));
return -errno;
}
strncpy(buffer, fullpath, *size);
free(fullpath);
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
vm_statistics_data_t info;
mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t);
if (host_statistics(mach_host_self(), HOST_VM_INFO,
(host_info_t)&info, &count) != KERN_SUCCESS) {
return -EINVAL; /* FIXME(bnoordhuis) Translate error. */
}
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
uint64_t info;
int which[] = {CTL_HW, HW_MEMSIZE};
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
return (uint64_t) info;
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_resident_set_memory(size_t* rss) {
mach_msg_type_number_t count;
task_basic_info_data_t info;
kern_return_t err;
count = TASK_BASIC_INFO_COUNT;
err = task_info(mach_task_self(),
TASK_BASIC_INFO,
(task_info_t) &info,
&count);
(void) &err;
/* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
* KERN_SUCCESS implies a libuv bug.
*/
assert(err == KERN_SUCCESS);
*rss = info.resident_size;
return 0;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
now = time(NULL);
*uptime = now - info.tv_sec;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks);
char model[512];
uint64_t cpuspeed;
size_t size;
unsigned int i;
natural_t numcpus;
mach_msg_type_number_t msg_type;
processor_cpu_load_info_data_t *info;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
sysctlbyname("hw.model", &model, &size, NULL, 0)) {
return -errno;
}
size = sizeof(cpuspeed);
if (sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0))
return -errno;
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
(processor_info_array_t*)&info,
&msg_type) != KERN_SUCCESS) {
return -EINVAL; /* FIXME(bnoordhuis) Translate error. */
}
*cpu_infos = malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos))
return -ENOMEM; /* FIXME(bnoordhuis) Deallocate info? */
*count = numcpus;
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier;
cpu_info->cpu_times.irq = 0;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed/1000000;
}
vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
return 0;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_dl *sa_addr;
if (getifaddrs(&addrs))
return -errno;
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == AF_LINK)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses))
return -ENOMEM;
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
continue;
if (ent->ifa_addr == NULL)
continue;
/*
* On Mac OS X getifaddrs returns information related to Mac Addresses for
* various devices, such as firewire, etc. These are not relevant here.
*/
if (ent->ifa_addr->sa_family == AF_LINK)
continue;
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != AF_LINK)) {
continue;
}
address = *addresses;
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -0,0 +1,83 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <dlfcn.h>
#include <errno.h>
#include <string.h>
#include <locale.h>
static int uv__dlerror(uv_lib_t* lib);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
dlerror(); /* Reset error status. */
lib->errmsg = NULL;
lib->handle = dlopen(filename, RTLD_LAZY);
return lib->handle ? 0 : uv__dlerror(lib);
}
void uv_dlclose(uv_lib_t* lib) {
if (lib->errmsg) {
free(lib->errmsg);
lib->errmsg = NULL;
}
if (lib->handle) {
/* Ignore errors. No good way to signal them without leaking memory. */
dlclose(lib->handle);
lib->handle = NULL;
}
}
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
dlerror(); /* Reset error status. */
*ptr = dlsym(lib->handle, name);
return uv__dlerror(lib);
}
const char* uv_dlerror(const uv_lib_t* lib) {
return lib->errmsg ? lib->errmsg : "no error";
}
static int uv__dlerror(uv_lib_t* lib) {
const char* errmsg;
if (lib->errmsg)
free(lib->errmsg);
errmsg = dlerror();
if (errmsg) {
lib->errmsg = strdup(errmsg);
return -1;
}
else {
lib->errmsg = NULL;
return 0;
}
}

View File

@ -0,0 +1,425 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <kvm.h>
#include <paths.h>
#include <sys/user.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <vm/vm_param.h> /* VM_LOADAVG */
#include <time.h>
#include <stdlib.h>
#include <unistd.h> /* sysconf */
#include <fcntl.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
#ifndef CPUSTATES
# define CPUSTATES 5U
#endif
#ifndef CP_USER
# define CP_USER 0
# define CP_NICE 1
# define CP_SYS 2
# define CP_IDLE 3
# define CP_INTR 4
#endif
static char *process_title;
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
size_t cb;
if (buffer == NULL || size == NULL)
return -EINVAL;
#ifdef __DragonFly__
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_ARGS;
mib[3] = getpid();
#else
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PATHNAME;
mib[3] = -1;
#endif
cb = *size;
if (sysctl(mib, 4, buffer, &cb, NULL, 0))
return -errno;
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
int freecount;
size_t size = sizeof(freecount);
if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
return -errno;
return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
unsigned long info;
int which[] = {CTL_HW, HW_PHYSMEM};
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
return (uint64_t) info;
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc ? strdup(argv[0]) : NULL;
return argv;
}
int uv_set_process_title(const char* title) {
int oid[4];
if (process_title) free(process_title);
process_title = strdup(title);
oid[0] = CTL_KERN;
oid[1] = KERN_PROC;
oid[2] = KERN_PROC_ARGS;
oid[3] = getpid();
sysctl(oid,
ARRAY_SIZE(oid),
NULL,
NULL,
process_title,
strlen(process_title) + 1);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (process_title) {
strncpy(buffer, process_title, size);
} else {
if (size > 0) {
buffer[0] = '\0';
}
}
return 0;
}
int uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc *kinfo = NULL;
pid_t pid;
int nprocs;
size_t page_size = getpagesize();
pid = getpid();
kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open");
if (kd == NULL) goto error;
kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, &nprocs);
if (kinfo == NULL) goto error;
#ifdef __DragonFly__
*rss = kinfo->kp_vm_rssize * page_size;
#else
*rss = kinfo->ki_rssize * page_size;
#endif
kvm_close(kd);
return 0;
error:
if (kd) kvm_close(kd);
return -EPERM;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus,
cur = 0;
uv_cpu_info_t* cpu_info;
const char* maxcpus_key;
const char* cptimes_key;
char model[512];
long* cp_times;
int numcpus;
size_t size;
int i;
#if defined(__DragonFly__)
/* This is not quite correct but DragonFlyBSD doesn't seem to have anything
* comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total,
* not per CPU). At least this stops uv_cpu_info() from failing completely.
*/
maxcpus_key = "hw.ncpu";
cptimes_key = "kern.cp_time";
#else
maxcpus_key = "kern.smp.maxcpus";
cptimes_key = "kern.cp_times";
#endif
size = sizeof(model);
if (sysctlbyname("hw.model", &model, &size, NULL, 0))
return -errno;
size = sizeof(numcpus);
if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
return -errno;
*cpu_infos = malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos))
return -ENOMEM;
*count = numcpus;
size = sizeof(cpuspeed);
if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0)) {
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
/* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of
* ncpu.
*/
size = sizeof(maxcpus);
if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) {
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
size = maxcpus * CPUSTATES * sizeof(long);
cp_times = malloc(size);
if (cp_times == NULL) {
free(*cpu_infos);
return -ENOMEM;
}
if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) {
SAVE_ERRNO(free(cp_times));
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed;
cur+=CPUSTATES;
}
free(cp_times);
return 0;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_dl *sa_addr;
if (getifaddrs(&addrs))
return -errno;
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == AF_LINK)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses))
return -ENOMEM;
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
continue;
if (ent->ifa_addr == NULL)
continue;
/*
* On FreeBSD getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information yet.
*/
if (ent->ifa_addr->sa_family == AF_LINK)
continue;
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != AF_LINK)) {
continue;
}
address = *addresses;
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,899 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#if TARGET_OS_IPHONE
/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */
int uv__fsevents_init(uv_fs_event_t* handle) {
return 0;
}
int uv__fsevents_close(uv_fs_event_t* handle) {
return 0;
}
void uv__fsevents_loop_delete(uv_loop_t* loop) {
}
#else /* TARGET_OS_IPHONE */
#include <dlfcn.h>
#include <assert.h>
#include <stdlib.h>
#include <pthread.h>
#include <CoreFoundation/CFRunLoop.h>
#include <CoreServices/CoreServices.h>
/* These are macros to avoid "initializer element is not constant" errors
* with old versions of gcc.
*/
#define kFSEventsModified (kFSEventStreamEventFlagItemFinderInfoMod | \
kFSEventStreamEventFlagItemModified | \
kFSEventStreamEventFlagItemInodeMetaMod | \
kFSEventStreamEventFlagItemChangeOwner | \
kFSEventStreamEventFlagItemXattrMod)
#define kFSEventsRenamed (kFSEventStreamEventFlagItemCreated | \
kFSEventStreamEventFlagItemRemoved | \
kFSEventStreamEventFlagItemRenamed)
#define kFSEventsSystem (kFSEventStreamEventFlagUserDropped | \
kFSEventStreamEventFlagKernelDropped | \
kFSEventStreamEventFlagEventIdsWrapped | \
kFSEventStreamEventFlagHistoryDone | \
kFSEventStreamEventFlagMount | \
kFSEventStreamEventFlagUnmount | \
kFSEventStreamEventFlagRootChanged)
typedef struct uv__fsevents_event_s uv__fsevents_event_t;
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
typedef struct uv__cf_loop_state_s uv__cf_loop_state_t;
struct uv__cf_loop_signal_s {
QUEUE member;
uv_fs_event_t* handle;
};
struct uv__fsevents_event_s {
QUEUE member;
int events;
char path[1];
};
struct uv__cf_loop_state_s {
CFRunLoopRef loop;
CFRunLoopSourceRef signal_source;
int fsevent_need_reschedule;
FSEventStreamRef fsevent_stream;
uv_sem_t fsevent_sem;
uv_mutex_t fsevent_mutex;
void* fsevent_handles[2];
unsigned int fsevent_handle_count;
};
/* Forward declarations */
static void uv__cf_loop_cb(void* arg);
static void* uv__cf_loop_runner(void* arg);
static int uv__cf_loop_signal(uv_loop_t* loop, uv_fs_event_t* handle);
/* Lazy-loaded by uv__fsevents_global_init(). */
static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef,
const void**,
CFIndex,
const CFArrayCallBacks*);
static void (*pCFRelease)(CFTypeRef);
static void (*pCFRunLoopAddSource)(CFRunLoopRef,
CFRunLoopSourceRef,
CFStringRef);
static CFRunLoopRef (*pCFRunLoopGetCurrent)(void);
static void (*pCFRunLoopRemoveSource)(CFRunLoopRef,
CFRunLoopSourceRef,
CFStringRef);
static void (*pCFRunLoopRun)(void);
static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef,
CFIndex,
CFRunLoopSourceContext*);
static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef);
static void (*pCFRunLoopStop)(CFRunLoopRef);
static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
CFAllocatorRef,
const char*);
static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
static CFStringRef (*pkCFRunLoopDefaultMode);
static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamCallback,
FSEventStreamContext*,
CFArrayRef,
FSEventStreamEventId,
CFTimeInterval,
FSEventStreamCreateFlags);
static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
static void (*pFSEventStreamRelease)(FSEventStreamRef);
static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
CFRunLoopRef,
CFStringRef);
static Boolean (*pFSEventStreamStart)(FSEventStreamRef);
static void (*pFSEventStreamStop)(FSEventStreamRef);
#define UV__FSEVENTS_PROCESS(handle, block) \
do { \
QUEUE events; \
QUEUE* q; \
uv__fsevents_event_t* event; \
int err; \
uv_mutex_lock(&(handle)->cf_mutex); \
/* Split-off all events and empty original queue */ \
QUEUE_INIT(&events); \
if (!QUEUE_EMPTY(&(handle)->cf_events)) { \
q = QUEUE_HEAD(&(handle)->cf_events); \
QUEUE_SPLIT(&(handle)->cf_events, q, &events); \
} \
/* Get error (if any) and zero original one */ \
err = (handle)->cf_error; \
(handle)->cf_error = 0; \
uv_mutex_unlock(&(handle)->cf_mutex); \
/* Loop through events, deallocating each after processing */ \
while (!QUEUE_EMPTY(&events)) { \
q = QUEUE_HEAD(&events); \
event = QUEUE_DATA(q, uv__fsevents_event_t, member); \
QUEUE_REMOVE(q); \
/* NOTE: Checking uv__is_active() is required here, because handle \
* callback may close handle and invoking it after it will lead to \
* incorrect behaviour */ \
if (!uv__is_closing((handle)) && uv__is_active((handle))) \
block \
/* Free allocated data */ \
free(event); \
} \
if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \
(handle)->cb((handle), NULL, 0, err); \
} while (0)
/* Runs in UV loop's thread, when there're events to report to handle */
static void uv__fsevents_cb(uv_async_t* cb) {
uv_fs_event_t* handle;
handle = cb->data;
UV__FSEVENTS_PROCESS(handle, {
handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0);
});
}
/* Runs in CF thread, pushed event into handle's event list */
static void uv__fsevents_push_event(uv_fs_event_t* handle,
QUEUE* events,
int err) {
assert(events != NULL || err != 0);
uv_mutex_lock(&handle->cf_mutex);
/* Concatenate two queues */
if (events != NULL)
QUEUE_ADD(&handle->cf_events, events);
/* Propagate error */
if (err != 0)
handle->cf_error = err;
uv_mutex_unlock(&handle->cf_mutex);
uv_async_send(handle->cf_cb);
}
/* Runs in CF thread, when there're events in FSEventStream */
static void uv__fsevents_event_cb(ConstFSEventStreamRef streamRef,
void* info,
size_t numEvents,
void* eventPaths,
const FSEventStreamEventFlags eventFlags[],
const FSEventStreamEventId eventIds[]) {
size_t i;
int len;
char** paths;
char* path;
char* pos;
uv_fs_event_t* handle;
QUEUE* q;
uv_loop_t* loop;
uv__cf_loop_state_t* state;
uv__fsevents_event_t* event;
QUEUE head;
loop = info;
state = loop->cf_state;
assert(state != NULL);
paths = eventPaths;
/* For each handle */
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_FOREACH(q, &state->fsevent_handles) {
handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
QUEUE_INIT(&head);
/* Process and filter out events */
for (i = 0; i < numEvents; i++) {
/* Ignore system events */
if (eventFlags[i] & kFSEventsSystem)
continue;
path = paths[i];
len = strlen(path);
/* Filter out paths that are outside handle's request */
if (strncmp(path, handle->realpath, handle->realpath_len) != 0)
continue;
if (handle->realpath_len > 1 || *handle->realpath != '/') {
path += handle->realpath_len;
len -= handle->realpath_len;
/* Skip forward slash */
if (*path != '\0') {
path++;
len--;
}
}
#ifdef MAC_OS_X_VERSION_10_7
/* Ignore events with path equal to directory itself */
if (len == 0)
continue;
#endif /* MAC_OS_X_VERSION_10_7 */
/* Do not emit events from subdirectories (without option set) */
if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != 0) {
pos = strchr(path + 1, '/');
if (pos != NULL)
continue;
}
#ifndef MAC_OS_X_VERSION_10_7
path = "";
len = 0;
#endif /* MAC_OS_X_VERSION_10_7 */
event = malloc(sizeof(*event) + len);
if (event == NULL)
break;
memset(event, 0, sizeof(*event));
memcpy(event->path, path, len + 1);
if ((eventFlags[i] & kFSEventsModified) != 0 &&
(eventFlags[i] & kFSEventsRenamed) == 0)
event->events = UV_CHANGE;
else
event->events = UV_RENAME;
QUEUE_INSERT_TAIL(&head, &event->member);
}
if (!QUEUE_EMPTY(&head))
uv__fsevents_push_event(handle, &head, 0);
}
uv_mutex_unlock(&state->fsevent_mutex);
}
/* Runs in CF thread */
static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
uv__cf_loop_state_t* state;
FSEventStreamContext ctx;
FSEventStreamRef ref;
CFAbsoluteTime latency;
FSEventStreamCreateFlags flags;
/* Initialize context */
ctx.version = 0;
ctx.info = loop;
ctx.retain = NULL;
ctx.release = NULL;
ctx.copyDescription = NULL;
latency = 0.05;
/* Explanation of selected flags:
* 1. NoDefer - without this flag, events that are happening continuously
* (i.e. each event is happening after time interval less than `latency`,
* counted from previous event), will be deferred and passed to callback
* once they'll either fill whole OS buffer, or when this continuous stream
* will stop (i.e. there'll be delay between events, bigger than
* `latency`).
* Specifying this flag will invoke callback after `latency` time passed
* since event.
* 2. FileEvents - fire callback for file changes too (by default it is firing
* it only for directory changes).
*/
flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents;
/*
* NOTE: It might sound like a good idea to remember last seen StreamEventId,
* but in reality one dir might have last StreamEventId less than, the other,
* that is being watched now. Which will cause FSEventStream API to report
* changes to files from the past.
*/
ref = pFSEventStreamCreate(NULL,
&uv__fsevents_event_cb,
&ctx,
paths,
kFSEventStreamEventIdSinceNow,
latency,
flags);
assert(ref != NULL);
state = loop->cf_state;
pFSEventStreamScheduleWithRunLoop(ref,
state->loop,
*pkCFRunLoopDefaultMode);
if (!pFSEventStreamStart(ref)) {
pFSEventStreamInvalidate(ref);
pFSEventStreamRelease(ref);
return -EMFILE;
}
state->fsevent_stream = ref;
return 0;
}
/* Runs in CF thread */
static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
uv__cf_loop_state_t* state;
state = loop->cf_state;
if (state->fsevent_stream == NULL)
return;
/* Flush all accumulated events */
pFSEventStreamFlushSync(state->fsevent_stream);
/* Stop emitting events */
pFSEventStreamStop(state->fsevent_stream);
/* Release stream */
pFSEventStreamInvalidate(state->fsevent_stream);
pFSEventStreamRelease(state->fsevent_stream);
state->fsevent_stream = NULL;
}
/* Runs in CF thread, when there're new fsevent handles to add to stream */
static void uv__fsevents_reschedule(uv_fs_event_t* handle) {
uv__cf_loop_state_t* state;
QUEUE* q;
uv_fs_event_t* curr;
CFArrayRef cf_paths;
CFStringRef* paths;
unsigned int i;
int err;
unsigned int path_count;
state = handle->loop->cf_state;
paths = NULL;
cf_paths = NULL;
err = 0;
/* NOTE: `i` is used in deallocation loop below */
i = 0;
/* Optimization to prevent O(n^2) time spent when starting to watch
* many files simultaneously
*/
uv_mutex_lock(&state->fsevent_mutex);
if (state->fsevent_need_reschedule == 0) {
uv_mutex_unlock(&state->fsevent_mutex);
goto final;
}
state->fsevent_need_reschedule = 0;
uv_mutex_unlock(&state->fsevent_mutex);
/* Destroy previous FSEventStream */
uv__fsevents_destroy_stream(handle->loop);
/* Any failure below will be a memory failure */
err = -ENOMEM;
/* Create list of all watched paths */
uv_mutex_lock(&state->fsevent_mutex);
path_count = state->fsevent_handle_count;
if (path_count != 0) {
paths = malloc(sizeof(*paths) * path_count);
if (paths == NULL) {
uv_mutex_unlock(&state->fsevent_mutex);
goto final;
}
q = &state->fsevent_handles;
for (; i < path_count; i++) {
q = QUEUE_NEXT(q);
assert(q != &state->fsevent_handles);
curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
assert(curr->realpath != NULL);
paths[i] =
pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath);
if (paths[i] == NULL) {
uv_mutex_unlock(&state->fsevent_mutex);
goto final;
}
}
}
uv_mutex_unlock(&state->fsevent_mutex);
err = 0;
if (path_count != 0) {
/* Create new FSEventStream */
cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL);
if (cf_paths == NULL) {
err = -ENOMEM;
goto final;
}
err = uv__fsevents_create_stream(handle->loop, cf_paths);
}
final:
/* Deallocate all paths in case of failure */
if (err != 0) {
if (cf_paths == NULL) {
while (i != 0)
pCFRelease(paths[--i]);
free(paths);
} else {
/* CFArray takes ownership of both strings and original C-array */
pCFRelease(cf_paths);
}
/* Broadcast error to all handles */
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_FOREACH(q, &state->fsevent_handles) {
curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
uv__fsevents_push_event(curr, NULL, err);
}
uv_mutex_unlock(&state->fsevent_mutex);
}
/*
* Main thread will block until the removal of handle from the list,
* we must tell it when we're ready.
*
* NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close`
*/
if (!uv__is_active(handle))
uv_sem_post(&state->fsevent_sem);
}
static int uv__fsevents_global_init(void) {
static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER;
static void* core_foundation_handle;
static void* core_services_handle;
int err;
err = 0;
pthread_mutex_lock(&global_init_mutex);
if (core_foundation_handle != NULL)
goto out;
/* The libraries are never unloaded because we currently don't have a good
* mechanism for keeping a reference count. It's unlikely to be an issue
* but if it ever becomes one, we can turn the dynamic library handles into
* per-event loop properties and have the dynamic linker keep track for us.
*/
err = -ENOSYS;
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"Versions/A/CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
if (core_foundation_handle == NULL)
goto out;
core_services_handle = dlopen("/System/Library/Frameworks/"
"CoreServices.framework/"
"Versions/A/CoreServices",
RTLD_LAZY | RTLD_LOCAL);
if (core_services_handle == NULL)
goto out;
err = -ENOENT;
#define V(handle, symbol) \
do { \
p ## symbol = dlsym((handle), #symbol); \
if (p ## symbol == NULL) \
goto out; \
} \
while (0)
V(core_foundation_handle, CFArrayCreate);
V(core_foundation_handle, CFRelease);
V(core_foundation_handle, CFRunLoopAddSource);
V(core_foundation_handle, CFRunLoopGetCurrent);
V(core_foundation_handle, CFRunLoopRemoveSource);
V(core_foundation_handle, CFRunLoopRun);
V(core_foundation_handle, CFRunLoopSourceCreate);
V(core_foundation_handle, CFRunLoopSourceSignal);
V(core_foundation_handle, CFRunLoopStop);
V(core_foundation_handle, CFRunLoopWakeUp);
V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
V(core_foundation_handle, CFStringGetSystemEncoding);
V(core_foundation_handle, kCFRunLoopDefaultMode);
V(core_services_handle, FSEventStreamCreate);
V(core_services_handle, FSEventStreamFlushSync);
V(core_services_handle, FSEventStreamInvalidate);
V(core_services_handle, FSEventStreamRelease);
V(core_services_handle, FSEventStreamScheduleWithRunLoop);
V(core_services_handle, FSEventStreamStart);
V(core_services_handle, FSEventStreamStop);
#undef V
err = 0;
out:
if (err && core_services_handle != NULL) {
dlclose(core_services_handle);
core_services_handle = NULL;
}
if (err && core_foundation_handle != NULL) {
dlclose(core_foundation_handle);
core_foundation_handle = NULL;
}
pthread_mutex_unlock(&global_init_mutex);
return err;
}
/* Runs in UV loop */
static int uv__fsevents_loop_init(uv_loop_t* loop) {
CFRunLoopSourceContext ctx;
uv__cf_loop_state_t* state;
pthread_attr_t attr_storage;
pthread_attr_t* attr;
int err;
if (loop->cf_state != NULL)
return 0;
err = uv__fsevents_global_init();
if (err)
return err;
state = calloc(1, sizeof(*state));
if (state == NULL)
return -ENOMEM;
err = uv_mutex_init(&loop->cf_mutex);
if (err)
goto fail_mutex_init;
err = uv_sem_init(&loop->cf_sem, 0);
if (err)
goto fail_sem_init;
QUEUE_INIT(&loop->cf_signals);
err = uv_sem_init(&state->fsevent_sem, 0);
if (err)
goto fail_fsevent_sem_init;
err = uv_mutex_init(&state->fsevent_mutex);
if (err)
goto fail_fsevent_mutex_init;
QUEUE_INIT(&state->fsevent_handles);
state->fsevent_need_reschedule = 0;
state->fsevent_handle_count = 0;
memset(&ctx, 0, sizeof(ctx));
ctx.info = loop;
ctx.perform = uv__cf_loop_cb;
state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx);
if (state->signal_source == NULL) {
err = -ENOMEM;
goto fail_signal_source_create;
}
/* In the unlikely event that pthread_attr_init() fails, create the thread
* with the default stack size. We'll use a little more address space but
* that in itself is not a fatal error.
*/
attr = &attr_storage;
if (pthread_attr_init(attr))
attr = NULL;
if (attr != NULL)
if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN))
abort();
loop->cf_state = state;
/* uv_thread_t is an alias for pthread_t. */
err = -pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop);
if (attr != NULL)
pthread_attr_destroy(attr);
if (err)
goto fail_thread_create;
/* Synchronize threads */
uv_sem_wait(&loop->cf_sem);
return 0;
fail_thread_create:
loop->cf_state = NULL;
fail_signal_source_create:
uv_mutex_destroy(&state->fsevent_mutex);
fail_fsevent_mutex_init:
uv_sem_destroy(&state->fsevent_sem);
fail_fsevent_sem_init:
uv_sem_destroy(&loop->cf_sem);
fail_sem_init:
uv_mutex_destroy(&loop->cf_mutex);
fail_mutex_init:
free(state);
return err;
}
/* Runs in UV loop */
void uv__fsevents_loop_delete(uv_loop_t* loop) {
uv__cf_loop_signal_t* s;
uv__cf_loop_state_t* state;
QUEUE* q;
if (loop->cf_state == NULL)
return;
if (uv__cf_loop_signal(loop, NULL) != 0)
abort();
uv_thread_join(&loop->cf_thread);
uv_sem_destroy(&loop->cf_sem);
uv_mutex_destroy(&loop->cf_mutex);
/* Free any remaining data */
while (!QUEUE_EMPTY(&loop->cf_signals)) {
q = QUEUE_HEAD(&loop->cf_signals);
s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
QUEUE_REMOVE(q);
free(s);
}
/* Destroy state */
state = loop->cf_state;
uv_sem_destroy(&state->fsevent_sem);
uv_mutex_destroy(&state->fsevent_mutex);
pCFRelease(state->signal_source);
free(state);
loop->cf_state = NULL;
}
/* Runs in CF thread. This is the CF loop's body */
static void* uv__cf_loop_runner(void* arg) {
uv_loop_t* loop;
uv__cf_loop_state_t* state;
loop = arg;
state = loop->cf_state;
state->loop = pCFRunLoopGetCurrent();
pCFRunLoopAddSource(state->loop,
state->signal_source,
*pkCFRunLoopDefaultMode);
uv_sem_post(&loop->cf_sem);
pCFRunLoopRun();
pCFRunLoopRemoveSource(state->loop,
state->signal_source,
*pkCFRunLoopDefaultMode);
return NULL;
}
/* Runs in CF thread, executed after `uv__cf_loop_signal()` */
static void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
uv__cf_loop_state_t* state;
QUEUE* item;
QUEUE split_head;
uv__cf_loop_signal_t* s;
loop = arg;
state = loop->cf_state;
QUEUE_INIT(&split_head);
uv_mutex_lock(&loop->cf_mutex);
if (!QUEUE_EMPTY(&loop->cf_signals)) {
QUEUE* split_pos = QUEUE_HEAD(&loop->cf_signals);
QUEUE_SPLIT(&loop->cf_signals, split_pos, &split_head);
}
uv_mutex_unlock(&loop->cf_mutex);
while (!QUEUE_EMPTY(&split_head)) {
item = QUEUE_HEAD(&split_head);
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
/* This was a termination signal */
if (s->handle == NULL)
pCFRunLoopStop(state->loop);
else
uv__fsevents_reschedule(s->handle);
QUEUE_REMOVE(item);
free(s);
}
}
/* Runs in UV loop to notify CF thread */
int uv__cf_loop_signal(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__cf_loop_signal_t* item;
uv__cf_loop_state_t* state;
item = malloc(sizeof(*item));
if (item == NULL)
return -ENOMEM;
item->handle = handle;
uv_mutex_lock(&loop->cf_mutex);
QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
uv_mutex_unlock(&loop->cf_mutex);
state = loop->cf_state;
assert(state != NULL);
pCFRunLoopSourceSignal(state->signal_source);
pCFRunLoopWakeUp(state->loop);
return 0;
}
/* Runs in UV loop to initialize handle */
int uv__fsevents_init(uv_fs_event_t* handle) {
int err;
uv__cf_loop_state_t* state;
err = uv__fsevents_loop_init(handle->loop);
if (err)
return err;
/* Get absolute path to file */
handle->realpath = realpath(handle->path, NULL);
if (handle->realpath == NULL)
return -errno;
handle->realpath_len = strlen(handle->realpath);
/* Initialize event queue */
QUEUE_INIT(&handle->cf_events);
handle->cf_error = 0;
/*
* Events will occur in other thread.
* Initialize callback for getting them back into event loop's thread
*/
handle->cf_cb = malloc(sizeof(*handle->cf_cb));
if (handle->cf_cb == NULL) {
err = -ENOMEM;
goto fail_cf_cb_malloc;
}
handle->cf_cb->data = handle;
uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb);
handle->cf_cb->flags |= UV__HANDLE_INTERNAL;
uv_unref((uv_handle_t*) handle->cf_cb);
err = uv_mutex_init(&handle->cf_mutex);
if (err)
goto fail_cf_mutex_init;
/* Insert handle into the list */
state = handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
state->fsevent_handle_count++;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
/* Reschedule FSEventStream */
assert(handle != NULL);
err = uv__cf_loop_signal(handle->loop, handle);
if (err)
goto fail_loop_signal;
return 0;
fail_loop_signal:
uv_mutex_destroy(&handle->cf_mutex);
fail_cf_mutex_init:
free(handle->cf_cb);
handle->cf_cb = NULL;
fail_cf_cb_malloc:
free(handle->realpath);
handle->realpath = NULL;
handle->realpath_len = 0;
return err;
}
/* Runs in UV loop to de-initialize handle */
int uv__fsevents_close(uv_fs_event_t* handle) {
int err;
uv__cf_loop_state_t* state;
if (handle->cf_cb == NULL)
return -EINVAL;
/* Remove handle from the list */
state = handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_REMOVE(&handle->cf_member);
state->fsevent_handle_count--;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
/* Reschedule FSEventStream */
assert(handle != NULL);
err = uv__cf_loop_signal(handle->loop, handle);
if (err)
return -err;
/* Wait for deinitialization */
uv_sem_wait(&state->fsevent_sem);
uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) free);
handle->cf_cb = NULL;
/* Free data in queue */
UV__FSEVENTS_PROCESS(handle, {
/* NOP */
});
uv_mutex_destroy(&handle->cf_mutex);
free(handle->realpath);
handle->realpath = NULL;
handle->realpath_len = 0;
return 0;
}
#endif /* TARGET_OS_IPHONE */

View File

@ -0,0 +1,192 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stddef.h> /* NULL */
#include <stdlib.h>
#include <string.h>
int uv__getaddrinfo_translate_error(int sys_err) {
switch (sys_err) {
case 0: return 0;
#if defined(EAI_ADDRFAMILY)
case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY;
#endif
#if defined(EAI_AGAIN)
case EAI_AGAIN: return UV_EAI_AGAIN;
#endif
#if defined(EAI_BADFLAGS)
case EAI_BADFLAGS: return UV_EAI_BADFLAGS;
#endif
#if defined(EAI_BADHINTS)
case EAI_BADHINTS: return UV_EAI_BADHINTS;
#endif
#if defined(EAI_CANCELED)
case EAI_CANCELED: return UV_EAI_CANCELED;
#endif
#if defined(EAI_FAIL)
case EAI_FAIL: return UV_EAI_FAIL;
#endif
#if defined(EAI_FAMILY)
case EAI_FAMILY: return UV_EAI_FAMILY;
#endif
#if defined(EAI_MEMORY)
case EAI_MEMORY: return UV_EAI_MEMORY;
#endif
#if defined(EAI_NODATA)
case EAI_NODATA: return UV_EAI_NODATA;
#endif
#if defined(EAI_NONAME)
# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME
case EAI_NONAME: return UV_EAI_NONAME;
# endif
#endif
#if defined(EAI_OVERFLOW)
case EAI_OVERFLOW: return UV_EAI_OVERFLOW;
#endif
#if defined(EAI_PROTOCOL)
case EAI_PROTOCOL: return UV_EAI_PROTOCOL;
#endif
#if defined(EAI_SERVICE)
case EAI_SERVICE: return UV_EAI_SERVICE;
#endif
#if defined(EAI_SOCKTYPE)
case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE;
#endif
#if defined(EAI_SYSTEM)
case EAI_SYSTEM: return -errno;
#endif
}
assert(!"unknown EAI_* error code");
abort();
return 0; /* Pacify compiler. */
}
static void uv__getaddrinfo_work(struct uv__work* w) {
uv_getaddrinfo_t* req;
int err;
req = container_of(w, uv_getaddrinfo_t, work_req);
err = getaddrinfo(req->hostname, req->service, req->hints, &req->res);
req->retcode = uv__getaddrinfo_translate_error(err);
}
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req;
struct addrinfo *res;
req = container_of(w, uv_getaddrinfo_t, work_req);
uv__req_unregister(req->loop, req);
res = req->res;
req->res = NULL;
/* See initialization in uv_getaddrinfo(). */
if (req->hints)
free(req->hints);
else if (req->service)
free(req->service);
else if (req->hostname)
free(req->hostname);
else
assert(0);
req->hints = NULL;
req->service = NULL;
req->hostname = NULL;
if (status == -ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
}
req->cb(req, req->retcode, res);
}
int uv_getaddrinfo(uv_loop_t* loop,
uv_getaddrinfo_t* req,
uv_getaddrinfo_cb cb,
const char* hostname,
const char* service,
const struct addrinfo* hints) {
size_t hostname_len;
size_t service_len;
size_t hints_len;
size_t len;
char* buf;
if (req == NULL || cb == NULL || (hostname == NULL && service == NULL))
return -EINVAL;
hostname_len = hostname ? strlen(hostname) + 1 : 0;
service_len = service ? strlen(service) + 1 : 0;
hints_len = hints ? sizeof(*hints) : 0;
buf = malloc(hostname_len + service_len + hints_len);
if (buf == NULL)
return -ENOMEM;
uv__req_init(loop, req, UV_GETADDRINFO);
req->loop = loop;
req->cb = cb;
req->res = NULL;
req->hints = NULL;
req->service = NULL;
req->hostname = NULL;
req->retcode = 0;
/* order matters, see uv_getaddrinfo_done() */
len = 0;
if (hints) {
req->hints = memcpy(buf + len, hints, sizeof(*hints));
len += sizeof(*hints);
}
if (service) {
req->service = memcpy(buf + len, service, service_len);
len += service_len;
}
if (hostname) {
req->hostname = memcpy(buf + len, hostname, hostname_len);
len += hostname_len;
}
uv__work_submit(loop,
&req->work_req,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
return 0;
}
void uv_freeaddrinfo(struct addrinfo* ai) {
if (ai)
freeaddrinfo(ai);
}

View File

@ -0,0 +1,114 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
static void uv__getnameinfo_work(struct uv__work* w) {
uv_getnameinfo_t* req;
int err;
socklen_t salen;
req = container_of(w, uv_getnameinfo_t, work_req);
if (req->storage.ss_family == AF_INET)
salen = sizeof(struct sockaddr_in);
else if (req->storage.ss_family == AF_INET6)
salen = sizeof(struct sockaddr_in6);
else
abort();
err = getnameinfo((struct sockaddr*) &req->storage,
salen,
req->host,
sizeof(req->host),
req->service,
sizeof(req->service),
req->flags);
req->retcode = uv__getaddrinfo_translate_error(err);
}
static void uv__getnameinfo_done(struct uv__work* w, int status) {
uv_getnameinfo_t* req;
char* host;
char* service;
req = container_of(w, uv_getnameinfo_t, work_req);
uv__req_unregister(req->loop, req);
host = service = NULL;
if (status == -ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
} else if (req->retcode == 0) {
host = req->host;
service = req->service;
}
req->getnameinfo_cb(req, req->retcode, host, service);
}
/*
* Entry point for getnameinfo
* return 0 if a callback will be made
* return error code if validation fails
*/
int uv_getnameinfo(uv_loop_t* loop,
uv_getnameinfo_t* req,
uv_getnameinfo_cb getnameinfo_cb,
const struct sockaddr* addr,
int flags) {
if (req == NULL || getnameinfo_cb == NULL || addr == NULL)
return UV_EINVAL;
if (addr->sa_family == AF_INET) {
memcpy(&req->storage,
addr,
sizeof(struct sockaddr_in));
} else if (addr->sa_family == AF_INET6) {
memcpy(&req->storage,
addr,
sizeof(struct sockaddr_in6));
} else {
return UV_EINVAL;
}
uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO);
req->getnameinfo_cb = getnameinfo_cb;
req->flags = flags;
req->type = UV_GETNAMEINFO;
req->loop = loop;
req->retcode = 0;
uv__work_submit(loop,
&req->work_req,
uv__getnameinfo_work,
uv__getnameinfo_done);
return 0;
}

View File

@ -0,0 +1,317 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_UNIX_INTERNAL_H_
#define UV_UNIX_INTERNAL_H_
#include "uv-common.h"
#include <assert.h>
#include <stdlib.h> /* abort */
#include <string.h> /* strrchr */
#include <fcntl.h> /* O_CLOEXEC, may be */
#if defined(__STRICT_ANSI__)
# define inline __inline
#endif
#if defined(__linux__)
# include "linux-syscalls.h"
#endif /* __linux__ */
#if defined(__sun)
# include <sys/port.h>
# include <port.h>
#endif /* __sun */
#if defined(_AIX)
#define reqevents events
#define rtnevents revents
#include <sys/poll.h>
#endif /* _AIX */
#if defined(__APPLE__) && !TARGET_OS_IPHONE
# include <CoreServices/CoreServices.h>
#endif
#define STATIC_ASSERT(expr) \
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
#define ACCESS_ONCE(type, var) \
(*(volatile type*) &(var))
#define UNREACHABLE() \
do { \
assert(0 && "unreachable code"); \
abort(); \
} \
while (0)
#define SAVE_ERRNO(block) \
do { \
int _saved_errno = errno; \
do { block; } while (0); \
errno = _saved_errno; \
} \
while (0)
/* The __clang__ and __INTEL_COMPILER checks are superfluous because they
* define __GNUC__. They are here to convey to you, dear reader, that these
* macros are enabled when compiling with clang or icc.
*/
#if defined(__clang__) || \
defined(__GNUC__) || \
defined(__INTEL_COMPILER) || \
defined(__SUNPRO_C)
# define UV_DESTRUCTOR(declaration) __attribute__((destructor)) declaration
# define UV_UNUSED(declaration) __attribute__((unused)) declaration
#else
# define UV_DESTRUCTOR(declaration) declaration
# define UV_UNUSED(declaration) declaration
#endif
#if defined(__linux__)
# define UV__POLLIN UV__EPOLLIN
# define UV__POLLOUT UV__EPOLLOUT
# define UV__POLLERR UV__EPOLLERR
# define UV__POLLHUP UV__EPOLLHUP
#endif
#if defined(__sun) || defined(_AIX)
# define UV__POLLIN POLLIN
# define UV__POLLOUT POLLOUT
# define UV__POLLERR POLLERR
# define UV__POLLHUP POLLHUP
#endif
#ifndef UV__POLLIN
# define UV__POLLIN 1
#endif
#ifndef UV__POLLOUT
# define UV__POLLOUT 2
#endif
#ifndef UV__POLLERR
# define UV__POLLERR 4
#endif
#ifndef UV__POLLHUP
# define UV__POLLHUP 8
#endif
#if !defined(O_CLOEXEC) && defined(__FreeBSD__)
/*
* It may be that we are just missing `__POSIX_VISIBLE >= 200809`.
* Try using fixed value const and give up, if it doesn't work
*/
# define O_CLOEXEC 0x00100000
#endif
typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
/* handle flags */
enum {
UV_CLOSING = 0x01, /* uv_close() called but not finished. */
UV_CLOSED = 0x02, /* close(2) finished. */
UV_STREAM_READING = 0x04, /* uv_read_start() called. */
UV_STREAM_SHUTTING = 0x08, /* uv_shutdown() called but not complete. */
UV_STREAM_SHUT = 0x10, /* Write side closed. */
UV_STREAM_READABLE = 0x20, /* The stream is readable */
UV_STREAM_WRITABLE = 0x40, /* The stream is writable */
UV_STREAM_BLOCKING = 0x80, /* Synchronous writes. */
UV_STREAM_READ_PARTIAL = 0x100, /* read(2) read less than requested. */
UV_STREAM_READ_EOF = 0x200, /* read(2) read EOF. */
UV_TCP_NODELAY = 0x400, /* Disable Nagle. */
UV_TCP_KEEPALIVE = 0x800, /* Turn on keep-alive. */
UV_TCP_SINGLE_ACCEPT = 0x1000, /* Only accept() when idle. */
UV_HANDLE_IPV6 = 0x2000 /* Handle is bound to a IPv6 socket. */
};
typedef enum {
UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */
UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */
} uv_clocktype_t;
struct uv__stream_queued_fds_s {
unsigned int size;
unsigned int offset;
int fds[1];
};
/* core */
int uv__nonblock(int fd, int set);
int uv__close(int fd);
int uv__cloexec(int fd, int set);
int uv__socket(int domain, int type, int protocol);
int uv__dup(int fd);
ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags);
void uv__make_close_pending(uv_handle_t* handle);
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_close(uv_loop_t* loop, uv__io_t* w);
void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
int uv__io_active(const uv__io_t* w, unsigned int events);
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
/* async */
void uv__async_send(struct uv__async* wa);
void uv__async_init(struct uv__async* wa);
int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb);
void uv__async_stop(uv_loop_t* loop, struct uv__async* wa);
/* loop */
void uv__run_idle(uv_loop_t* loop);
void uv__run_check(uv_loop_t* loop);
void uv__run_prepare(uv_loop_t* loop);
/* stream */
void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
uv_handle_type type);
int uv__stream_open(uv_stream_t*, int fd, int flags);
void uv__stream_destroy(uv_stream_t* stream);
#if defined(__APPLE__)
int uv__stream_try_select(uv_stream_t* stream, int* fd);
#endif /* defined(__APPLE__) */
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv__accept(int sockfd);
int uv__dup2_cloexec(int oldfd, int newfd);
int uv__open_cloexec(const char* path, int flags);
/* tcp */
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_nodelay(int fd, int on);
int uv__tcp_keepalive(int fd, int on, unsigned int delay);
/* pipe */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
/* timer */
void uv__run_timers(uv_loop_t* loop);
int uv__next_timeout(const uv_loop_t* loop);
/* signal */
void uv__signal_close(uv_signal_t* handle);
void uv__signal_global_once_init(void);
void uv__signal_loop_cleanup(uv_loop_t* loop);
/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop, int default_loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
/* various */
void uv__async_close(uv_async_t* handle);
void uv__check_close(uv_check_t* handle);
void uv__fs_event_close(uv_fs_event_t* handle);
void uv__idle_close(uv_idle_t* handle);
void uv__pipe_close(uv_pipe_t* handle);
void uv__poll_close(uv_poll_t* handle);
void uv__prepare_close(uv_prepare_t* handle);
void uv__process_close(uv_process_t* handle);
void uv__stream_close(uv_stream_t* handle);
void uv__tcp_close(uv_tcp_t* handle);
void uv__timer_close(uv_timer_t* handle);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
uv_handle_type uv__handle_type(int fd);
#if defined(__APPLE__)
int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
#else
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */
#ifdef UV__O_NONBLOCK
# define UV__F_NONBLOCK UV__O_NONBLOCK
#else
# define UV__F_NONBLOCK 1
#endif
int uv__make_socketpair(int fds[2], int flags);
int uv__make_pipe(int fds[2], int flags);
#if defined(__APPLE__)
int uv__fsevents_init(uv_fs_event_t* handle);
int uv__fsevents_close(uv_fs_event_t* handle);
void uv__fsevents_loop_delete(uv_loop_t* loop);
/* OSX < 10.7 has no file events, polyfill them */
#ifndef MAC_OS_X_VERSION_10_7
static const int kFSEventStreamCreateFlagFileEvents = 0x00000010;
static const int kFSEventStreamEventFlagItemCreated = 0x00000100;
static const int kFSEventStreamEventFlagItemRemoved = 0x00000200;
static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400;
static const int kFSEventStreamEventFlagItemRenamed = 0x00000800;
static const int kFSEventStreamEventFlagItemModified = 0x00001000;
static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000;
static const int kFSEventStreamEventFlagItemChangeOwner = 0x00004000;
static const int kFSEventStreamEventFlagItemXattrMod = 0x00008000;
static const int kFSEventStreamEventFlagItemIsFile = 0x00010000;
static const int kFSEventStreamEventFlagItemIsDir = 0x00020000;
static const int kFSEventStreamEventFlagItemIsSymlink = 0x00040000;
#endif /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 */
#endif /* defined(__APPLE__) */
UV_UNUSED(static void uv__req_init(uv_loop_t* loop,
uv_req_t* req,
uv_req_type type)) {
req->type = type;
uv__req_register(loop, req);
}
#define uv__req_init(loop, req, type) \
uv__req_init((loop), (uv_req_t*)(req), (type))
UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
/* Use a fast time source if available. We only need millisecond precision.
*/
loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
}
UV_UNUSED(static char* uv__basename_r(const char* path)) {
char* s;
s = strrchr(path, '/');
if (s == NULL)
return (char*) path;
return s + 1;
}
#ifdef HAVE_DTRACE
#include "uv-dtrace.h"
#else
#define UV_TICK_START(arg0, arg1)
#define UV_TICK_STOP(arg0, arg1)
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -0,0 +1,403 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
int uv__kqueue_init(uv_loop_t* loop) {
loop->backend_fd = kqueue();
if (loop->backend_fd == -1)
return -errno;
uv__cloexec(loop->backend_fd, 1);
return 0;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct kevent events[1024];
struct kevent* ev;
struct timespec spec;
unsigned int nevents;
unsigned int revents;
QUEUE* q;
uint64_t base;
uint64_t diff;
uv__io_t* w;
int filter;
int fflags;
int count;
int nfds;
int fd;
int op;
int i;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
nevents = 0;
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
filter = EVFILT_READ;
fflags = 0;
op = EV_ADD;
if (w->cb == uv__fs_event) {
filter = EVFILT_VNODE;
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
}
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;; nevents = 0) {
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
nfds = kevent(loop->backend_fd,
events,
nevents,
events,
ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (timeout == 0)
return;
if (timeout == -1)
continue;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
ev = events + i;
fd = ev->ident;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it. */
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != EBADF && errno != ENOENT)
abort();
continue;
}
if (ev->filter == EVFILT_VNODE) {
assert(w->events == UV__POLLIN);
assert(w->pevents == UV__POLLIN);
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
nevents++;
continue;
}
revents = 0;
if (ev->filter == EVFILT_READ) {
if (w->pevents & UV__POLLIN) {
revents |= UV__POLLIN;
w->rcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
}
if (ev->filter == EVFILT_WRITE) {
if (w->pevents & UV__POLLOUT) {
revents |= UV__POLLOUT;
w->wcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
}
if (ev->flags & EV_ERROR)
revents |= UV__POLLERR;
if (revents == 0)
continue;
w->cb(loop, w, revents);
nevents++;
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct kevent* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct kevent*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].ident == fd)
events[i].ident = -1;
}
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
uv_fs_event_t* handle;
struct kevent ev;
int events;
const char* path;
#if defined(F_GETPATH)
/* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
char pathbuf[MAXPATHLEN];
#endif
handle = container_of(w, uv_fs_event_t, event_watcher);
if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
events = UV_CHANGE;
else
events = UV_RENAME;
path = NULL;
#if defined(F_GETPATH)
/* Also works when the file has been unlinked from the file system. Passing
* in the path when the file has been deleted is arguably a little strange
* but it's consistent with what the inotify backend does.
*/
if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
path = uv__basename_r(pathbuf);
#endif
handle->cb(handle, path, events, 0);
if (handle->event_watcher.fd == -1)
return;
/* Watcher operates in one-shot mode, re-arm it. */
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
abort();
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
#if defined(__APPLE__)
struct stat statbuf;
#endif /* defined(__APPLE__) */
int fd;
if (uv__is_active(handle))
return -EINVAL;
/* TODO open asynchronously - but how do we report back errors? */
fd = open(path, O_RDONLY);
if (fd == -1)
return -errno;
uv__handle_start(handle);
uv__io_init(&handle->event_watcher, uv__fs_event, fd);
handle->path = strdup(path);
handle->cb = cb;
#if defined(__APPLE__)
/* Nullify field to perform checks later */
handle->cf_cb = NULL;
handle->realpath = NULL;
handle->realpath_len = 0;
handle->cf_flags = flags;
if (fstat(fd, &statbuf))
goto fallback;
/* FSEvents works only with directories */
if (!(statbuf.st_mode & S_IFDIR))
goto fallback;
return uv__fsevents_init(handle);
fallback:
#endif /* defined(__APPLE__) */
uv__io_start(handle->loop, &handle->event_watcher, UV__POLLIN);
return 0;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
if (!uv__is_active(handle))
return 0;
uv__handle_stop(handle);
#if defined(__APPLE__)
if (uv__fsevents_close(handle))
#endif /* defined(__APPLE__) */
{
uv__io_close(handle->loop, &handle->event_watcher);
}
free(handle->path);
handle->path = NULL;
uv__close(handle->event_watcher.fd);
handle->event_watcher.fd = -1;
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}

View File

@ -0,0 +1,827 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <net/if.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/sysinfo.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#define HAVE_IFADDRS_H 1
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 || __UCLIBC_MINOR__ < 9 || __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
# endif
#endif
#ifdef HAVE_IFADDRS_H
# if defined(__ANDROID__)
# include "android-ifaddrs.h"
# else
# include <ifaddrs.h>
# endif
# include <sys/socket.h>
# include <net/ethernet.h>
# include <linux/if_packet.h>
#endif /* HAVE_IFADDRS_H */
/* Available from 2.6.32 onwards. */
#ifndef CLOCK_MONOTONIC_COARSE
# define CLOCK_MONOTONIC_COARSE 6
#endif
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
* include that file because it conflicts with <time.h>. We'll just have to
* define it ourselves.
*/
#ifndef CLOCK_BOOTTIME
# define CLOCK_BOOTTIME 7
#endif
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci);
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static unsigned long read_cpufreq(unsigned int cpunum);
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
int fd;
fd = uv__epoll_create1(UV__EPOLL_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the EPOLL_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = uv__epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
}
loop->backend_fd = fd;
loop->inotify_fd = -1;
loop->inotify_watchers = NULL;
if (fd == -1)
return -errno;
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->inotify_fd == -1) return;
uv__io_stop(loop, &loop->inotify_read_watcher, UV__POLLIN);
uv__close(loop->inotify_fd);
loop->inotify_fd = -1;
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct uv__epoll_event* events;
struct uv__epoll_event dummy;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].data == fd)
events[i].data = -1;
/* Remove the file descriptor from the epoll.
* This avoids a problem where the same file description remains open
* in another process, causing repeated junk epoll events.
*
* We pass in a dummy epoll_event, to work around a bug in old kernels.
*/
if (loop->backend_fd >= 0)
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy);
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct uv__epoll_event events[1024];
struct uv__epoll_event* pe;
struct uv__epoll_event e;
QUEUE* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data = w->fd;
if (w->events == 0)
op = UV__EPOLL_CTL_ADD;
else
op = UV__EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == UV__EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
nfds = uv__epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | UV__POLLERR | UV__POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == UV__EPOLLERR || pe->events == UV__EPOLLHUP)
pe->events |= w->pevents & (UV__EPOLLIN | UV__EPOLLOUT);
if (pe->events != 0) {
w->cb(loop, w, pe->events);
nevents++;
}
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
uint64_t uv__hrtime(uv_clocktype_t type) {
static clock_t fast_clock_id = -1;
struct timespec t;
clock_t clock_id;
/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
* decide to make a costly system call.
*/
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
* when it has microsecond granularity or better (unlikely).
*/
if (type == UV_CLOCK_FAST && fast_clock_id == -1) {
if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 &&
t.tv_nsec <= 1 * 1000 * 1000) {
fast_clock_id = CLOCK_MONOTONIC_COARSE;
} else {
fast_clock_id = CLOCK_MONOTONIC;
}
}
clock_id = CLOCK_MONOTONIC;
if (type == UV_CLOCK_FAST)
clock_id = fast_clock_id;
if (clock_gettime(clock_id, &t))
return 0; /* Not really possible. */
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}
void uv_loadavg(double avg[3]) {
struct sysinfo info;
if (sysinfo(&info) < 0) return;
avg[0] = (double) info.loads[0] / 65536.0;
avg[1] = (double) info.loads[1] / 65536.0;
avg[2] = (double) info.loads[2] / 65536.0;
}
int uv_exepath(char* buffer, size_t* size) {
ssize_t n;
if (buffer == NULL || size == NULL)
return -EINVAL;
n = readlink("/proc/self/exe", buffer, *size - 1);
if (n == -1)
return -errno;
buffer[n] = '\0';
*size = n;
return 0;
}
uint64_t uv_get_free_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
uint64_t uv_get_total_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
}
int uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
ssize_t n;
long val;
int fd;
int i;
do
fd = open("/proc/self/stat", O_RDONLY);
while (fd == -1 && errno == EINTR);
if (fd == -1)
return -errno;
do
n = read(fd, buf, sizeof(buf) - 1);
while (n == -1 && errno == EINTR);
uv__close(fd);
if (n == -1)
return -errno;
buf[n] = '\0';
s = strchr(buf, ' ');
if (s == NULL)
goto err;
s += 1;
if (*s != '(')
goto err;
s = strchr(s, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (errno != 0)
goto err;
if (val < 0)
goto err;
*rss = val * getpagesize();
return 0;
err:
return -EINVAL;
}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
struct timespec now;
int r;
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
goto retry;
}
if (r)
return -errno;
*uptime = now.tv_sec;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int numcpus;
uv_cpu_info_t* ci;
int err;
*cpu_infos = NULL;
*count = 0;
numcpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(numcpus != (unsigned int) -1);
assert(numcpus != 0);
ci = calloc(numcpus, sizeof(*ci));
if (ci == NULL)
return -ENOMEM;
err = read_models(numcpus, ci);
if (err == 0)
err = read_times(numcpus, ci);
if (err) {
uv_free_cpu_info(ci, numcpus);
return err;
}
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
* We don't check for errors here. Worst case, the field is left zero.
*/
if (ci[0].speed == 0)
read_speeds(numcpus, ci);
*cpu_infos = ci;
*count = numcpus;
return 0;
}
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
unsigned int num;
for (num = 0; num < numcpus; num++)
ci[num].speed = read_cpufreq(num) / 1000;
}
/* Also reads the CPU frequency on x86. The other architectures only have
* a BogoMIPS field, which may not be very accurate.
*
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
*/
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
char buf[1024];
char* model;
FILE* fp;
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
(void) &model_marker;
(void) &speed_marker;
(void) &speed_idx;
(void) &model;
(void) &buf;
(void) &fp;
model_idx = 0;
speed_idx = 0;
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
defined(__x86_64__)
fp = fopen("/proc/cpuinfo", "r");
if (fp == NULL)
return -errno;
while (fgets(buf, sizeof(buf), fp)) {
if (model_idx < numcpus) {
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return -ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#if defined(__arm__) || defined(__mips__)
if (model_idx < numcpus) {
#if defined(__arm__)
/* Fallback for pre-3.8 kernels. */
static const char model_marker[] = "Processor\t: ";
#else /* defined(__mips__) */
static const char model_marker[] = "cpu model\t\t: ";
#endif
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return -ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#else /* !__arm__ && !__mips__ */
if (speed_idx < numcpus) {
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
continue;
}
}
#endif /* __arm__ || __mips__ */
}
fclose(fp);
#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
* isn't one, in that case we simply put "unknown" into everything.
*/
inferred_model = "unknown";
if (model_idx > 0)
inferred_model = ci[model_idx - 1].model;
while (model_idx < numcpus) {
model = strndup(inferred_model, strlen(inferred_model));
if (model == NULL)
return -ENOMEM;
ci[model_idx++].model = model;
}
return 0;
}
static int read_times(unsigned int numcpus, uv_cpu_info_t* ci) {
unsigned long clock_ticks;
struct uv_cpu_times_s ts;
unsigned long user;
unsigned long nice;
unsigned long sys;
unsigned long idle;
unsigned long dummy;
unsigned long irq;
unsigned int num;
unsigned int len;
char buf[1024];
FILE* fp;
clock_ticks = sysconf(_SC_CLK_TCK);
assert(clock_ticks != (unsigned long) -1);
assert(clock_ticks != 0);
fp = fopen("/proc/stat", "r");
if (fp == NULL)
return -errno;
if (!fgets(buf, sizeof(buf), fp))
abort();
num = 0;
while (fgets(buf, sizeof(buf), fp)) {
if (num >= numcpus)
break;
if (strncmp(buf, "cpu", 3))
break;
/* skip "cpu<num> " marker */
{
unsigned int n;
int r = sscanf(buf, "cpu%u ", &n);
assert(r == 1);
(void) r; /* silence build warning */
for (len = sizeof("cpu0"); n /= 10; len++);
}
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
* guest, guest_nice but we're only interested in the first four + irq.
*
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
* fields, they're not allowed in C89 mode.
*/
if (6 != sscanf(buf + len,
"%lu %lu %lu %lu %lu %lu",
&user,
&nice,
&sys,
&idle,
&dummy,
&irq))
abort();
ts.user = clock_ticks * user;
ts.nice = clock_ticks * nice;
ts.sys = clock_ticks * sys;
ts.idle = clock_ticks * idle;
ts.irq = clock_ticks * irq;
ci[num++].cpu_times = ts;
}
fclose(fp);
assert(num == numcpus);
return 0;
}
static unsigned long read_cpufreq(unsigned int cpunum) {
unsigned long val;
char buf[1024];
FILE* fp;
snprintf(buf,
sizeof(buf),
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
cpunum);
fp = fopen(buf, "r");
if (fp == NULL)
return 0;
if (fscanf(fp, "%lu", &val) != 1)
val = 0;
fclose(fp);
return val;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
int uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
#ifndef HAVE_IFADDRS_H
return -ENOSYS;
#else
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_ll *sll;
if (getifaddrs(&addrs))
return -errno;
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == PF_PACKET)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses))
return -ENOMEM;
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
continue;
if (ent->ifa_addr == NULL)
continue;
/*
* On Linux getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information yet.
*/
if (ent->ifa_addr->sa_family == PF_PACKET)
continue;
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != PF_PACKET)) {
continue;
}
address = *addresses;
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
sll = (struct sockaddr_ll*)ent->ifa_addr;
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
#endif
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}
void uv__set_process_title(const char* title) {
#if defined(PR_SET_NAME)
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
#endif
}

View File

@ -0,0 +1,257 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "tree.h"
#include "internal.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <unistd.h>
struct watcher_list {
RB_ENTRY(watcher_list) entry;
QUEUE watchers;
char* path;
int wd;
};
struct watcher_root {
struct watcher_list* rbh_root;
};
#define CAST(p) ((struct watcher_root*)(p))
static int compare_watchers(const struct watcher_list* a,
const struct watcher_list* b) {
if (a->wd < b->wd) return -1;
if (a->wd > b->wd) return 1;
return 0;
}
RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents);
static int new_inotify_fd(void) {
int err;
int fd;
fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC);
if (fd != -1)
return fd;
if (errno != ENOSYS)
return -errno;
fd = uv__inotify_init();
if (fd == -1)
return -errno;
err = uv__cloexec(fd, 1);
if (err == 0)
err = uv__nonblock(fd, 1);
if (err) {
uv__close(fd);
return err;
}
return fd;
}
static int init_inotify(uv_loop_t* loop) {
int err;
if (loop->inotify_fd != -1)
return 0;
err = new_inotify_fd();
if (err < 0)
return err;
loop->inotify_fd = err;
uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
uv__io_start(loop, &loop->inotify_read_watcher, UV__POLLIN);
return 0;
}
static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
struct watcher_list w;
w.wd = wd;
return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
}
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* dummy,
unsigned int events) {
const struct uv__inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
QUEUE* q;
const char* path;
ssize_t size;
const char *p;
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
char buf[4096];
while (1) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);
if (size == -1) {
assert(errno == EAGAIN || errno == EWOULDBLOCK);
break;
}
assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
/* Now we have one or more inotify_event structs. */
for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
e = (const struct uv__inotify_event*)p;
events = 0;
if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY))
events |= UV_CHANGE;
if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY))
events |= UV_RENAME;
w = find_watcher(loop, e->wd);
if (w == NULL)
continue; /* Stale event, no watchers left. */
/* inotify does not return the filename when monitoring a single file
* for modifications. Repurpose the filename for API compatibility.
* I'm not convinced this is a good thing, maybe it should go.
*/
path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
QUEUE_FOREACH(q, &w->watchers) {
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
h->cb(h, path, events, 0);
}
}
}
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
struct watcher_list* w;
int events;
int err;
int wd;
if (uv__is_active(handle))
return -EINVAL;
err = init_inotify(handle->loop);
if (err)
return err;
events = UV__IN_ATTRIB
| UV__IN_CREATE
| UV__IN_MODIFY
| UV__IN_DELETE
| UV__IN_DELETE_SELF
| UV__IN_MOVE_SELF
| UV__IN_MOVED_FROM
| UV__IN_MOVED_TO;
wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events);
if (wd == -1)
return -errno;
w = find_watcher(handle->loop, wd);
if (w)
goto no_insert;
w = malloc(sizeof(*w) + strlen(path) + 1);
if (w == NULL)
return -ENOMEM;
w->wd = wd;
w->path = strcpy((char*)(w + 1), path);
QUEUE_INIT(&w->watchers);
RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
no_insert:
uv__handle_start(handle);
QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
handle->path = w->path;
handle->cb = cb;
handle->wd = wd;
return 0;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
struct watcher_list* w;
if (!uv__is_active(handle))
return 0;
w = find_watcher(handle->loop, handle->wd);
assert(w != NULL);
handle->wd = -1;
handle->path = NULL;
uv__handle_stop(handle);
QUEUE_REMOVE(&handle->watchers);
if (QUEUE_EMPTY(&w->watchers)) {
/* No watchers left for this path. Clean up. */
RB_REMOVE(watcher_root, CAST(&handle->loop->inotify_watchers), w);
uv__inotify_rm_watch(handle->loop->inotify_fd, w->wd);
free(w);
}
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}

View File

@ -0,0 +1,445 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "linux-syscalls.h"
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <errno.h>
#if defined(__i386__)
# ifndef __NR_socketcall
# define __NR_socketcall 102
# endif
#endif
#if defined(__arm__)
# if defined(__thumb__) || defined(__ARM_EABI__)
# define UV_SYSCALL_BASE 0
# else
# define UV_SYSCALL_BASE 0x900000
# endif
#endif /* __arm__ */
#ifndef __NR_accept4
# if defined(__x86_64__)
# define __NR_accept4 288
# elif defined(__i386__)
/* Nothing. Handled through socketcall(). */
# elif defined(__arm__)
# define __NR_accept4 (UV_SYSCALL_BASE + 366)
# endif
#endif /* __NR_accept4 */
#ifndef __NR_eventfd
# if defined(__x86_64__)
# define __NR_eventfd 284
# elif defined(__i386__)
# define __NR_eventfd 323
# elif defined(__arm__)
# define __NR_eventfd (UV_SYSCALL_BASE + 351)
# endif
#endif /* __NR_eventfd */
#ifndef __NR_eventfd2
# if defined(__x86_64__)
# define __NR_eventfd2 290
# elif defined(__i386__)
# define __NR_eventfd2 328
# elif defined(__arm__)
# define __NR_eventfd2 (UV_SYSCALL_BASE + 356)
# endif
#endif /* __NR_eventfd2 */
#ifndef __NR_epoll_create
# if defined(__x86_64__)
# define __NR_epoll_create 213
# elif defined(__i386__)
# define __NR_epoll_create 254
# elif defined(__arm__)
# define __NR_epoll_create (UV_SYSCALL_BASE + 250)
# endif
#endif /* __NR_epoll_create */
#ifndef __NR_epoll_create1
# if defined(__x86_64__)
# define __NR_epoll_create1 291
# elif defined(__i386__)
# define __NR_epoll_create1 329
# elif defined(__arm__)
# define __NR_epoll_create1 (UV_SYSCALL_BASE + 357)
# endif
#endif /* __NR_epoll_create1 */
#ifndef __NR_epoll_ctl
# if defined(__x86_64__)
# define __NR_epoll_ctl 233 /* used to be 214 */
# elif defined(__i386__)
# define __NR_epoll_ctl 255
# elif defined(__arm__)
# define __NR_epoll_ctl (UV_SYSCALL_BASE + 251)
# endif
#endif /* __NR_epoll_ctl */
#ifndef __NR_epoll_wait
# if defined(__x86_64__)
# define __NR_epoll_wait 232 /* used to be 215 */
# elif defined(__i386__)
# define __NR_epoll_wait 256
# elif defined(__arm__)
# define __NR_epoll_wait (UV_SYSCALL_BASE + 252)
# endif
#endif /* __NR_epoll_wait */
#ifndef __NR_epoll_pwait
# if defined(__x86_64__)
# define __NR_epoll_pwait 281
# elif defined(__i386__)
# define __NR_epoll_pwait 319
# elif defined(__arm__)
# define __NR_epoll_pwait (UV_SYSCALL_BASE + 346)
# endif
#endif /* __NR_epoll_pwait */
#ifndef __NR_inotify_init
# if defined(__x86_64__)
# define __NR_inotify_init 253
# elif defined(__i386__)
# define __NR_inotify_init 291
# elif defined(__arm__)
# define __NR_inotify_init (UV_SYSCALL_BASE + 316)
# endif
#endif /* __NR_inotify_init */
#ifndef __NR_inotify_init1
# if defined(__x86_64__)
# define __NR_inotify_init1 294
# elif defined(__i386__)
# define __NR_inotify_init1 332
# elif defined(__arm__)
# define __NR_inotify_init1 (UV_SYSCALL_BASE + 360)
# endif
#endif /* __NR_inotify_init1 */
#ifndef __NR_inotify_add_watch
# if defined(__x86_64__)
# define __NR_inotify_add_watch 254
# elif defined(__i386__)
# define __NR_inotify_add_watch 292
# elif defined(__arm__)
# define __NR_inotify_add_watch (UV_SYSCALL_BASE + 317)
# endif
#endif /* __NR_inotify_add_watch */
#ifndef __NR_inotify_rm_watch
# if defined(__x86_64__)
# define __NR_inotify_rm_watch 255
# elif defined(__i386__)
# define __NR_inotify_rm_watch 293
# elif defined(__arm__)
# define __NR_inotify_rm_watch (UV_SYSCALL_BASE + 318)
# endif
#endif /* __NR_inotify_rm_watch */
#ifndef __NR_pipe2
# if defined(__x86_64__)
# define __NR_pipe2 293
# elif defined(__i386__)
# define __NR_pipe2 331
# elif defined(__arm__)
# define __NR_pipe2 (UV_SYSCALL_BASE + 359)
# endif
#endif /* __NR_pipe2 */
#ifndef __NR_recvmmsg
# if defined(__x86_64__)
# define __NR_recvmmsg 299
# elif defined(__i386__)
# define __NR_recvmmsg 337
# elif defined(__arm__)
# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
# endif
#endif /* __NR_recvmsg */
#ifndef __NR_sendmmsg
# if defined(__x86_64__)
# define __NR_sendmmsg 307
# elif defined(__i386__)
# define __NR_sendmmsg 345
# elif defined(__arm__)
# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
# endif
#endif /* __NR_sendmmsg */
#ifndef __NR_utimensat
# if defined(__x86_64__)
# define __NR_utimensat 280
# elif defined(__i386__)
# define __NR_utimensat 320
# elif defined(__arm__)
# define __NR_utimensat (UV_SYSCALL_BASE + 348)
# endif
#endif /* __NR_utimensat */
#ifndef __NR_preadv
# if defined(__x86_64__)
# define __NR_preadv 295
# elif defined(__i386__)
# define __NR_preadv 333
# elif defined(__arm__)
# define __NR_preadv (UV_SYSCALL_BASE + 361)
# endif
#endif /* __NR_preadv */
#ifndef __NR_pwritev
# if defined(__x86_64__)
# define __NR_pwritev 296
# elif defined(__i386__)
# define __NR_pwritev 334
# elif defined(__arm__)
# define __NR_pwritev (UV_SYSCALL_BASE + 362)
# endif
#endif /* __NR_pwritev */
#ifndef __NR_dup3
# if defined(__x86_64__)
# define __NR_dup3 292
# elif defined(__i386__)
# define __NR_dup3 330
# elif defined(__arm__)
# define __NR_dup3 (UV_SYSCALL_BASE + 358)
# endif
#endif /* __NR_pwritev */
int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) {
#if defined(__i386__)
unsigned long args[4];
int r;
args[0] = (unsigned long) fd;
args[1] = (unsigned long) addr;
args[2] = (unsigned long) addrlen;
args[3] = (unsigned long) flags;
r = syscall(__NR_socketcall, 18 /* SYS_ACCEPT4 */, args);
/* socketcall() raises EINVAL when SYS_ACCEPT4 is not supported but so does
* a bad flags argument. Try to distinguish between the two cases.
*/
if (r == -1)
if (errno == EINVAL)
if ((flags & ~(UV__SOCK_CLOEXEC|UV__SOCK_NONBLOCK)) == 0)
errno = ENOSYS;
return r;
#elif defined(__NR_accept4)
return syscall(__NR_accept4, fd, addr, addrlen, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__eventfd(unsigned int count) {
#if defined(__NR_eventfd)
return syscall(__NR_eventfd, count);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__eventfd2(unsigned int count, int flags) {
#if defined(__NR_eventfd2)
return syscall(__NR_eventfd2, count, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_create(int size) {
#if defined(__NR_epoll_create)
return syscall(__NR_epoll_create, size);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_create1(int flags) {
#if defined(__NR_epoll_create1)
return syscall(__NR_epoll_create1, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event* events) {
#if defined(__NR_epoll_ctl)
return syscall(__NR_epoll_ctl, epfd, op, fd, events);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_wait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout) {
#if defined(__NR_epoll_wait)
return syscall(__NR_epoll_wait, epfd, events, nevents, timeout);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__epoll_pwait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout,
const sigset_t* sigmask) {
#if defined(__NR_epoll_pwait)
return syscall(__NR_epoll_pwait,
epfd,
events,
nevents,
timeout,
sigmask,
sizeof(*sigmask));
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_init(void) {
#if defined(__NR_inotify_init)
return syscall(__NR_inotify_init);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_init1(int flags) {
#if defined(__NR_inotify_init1)
return syscall(__NR_inotify_init1, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_add_watch(int fd, const char* path, uint32_t mask) {
#if defined(__NR_inotify_add_watch)
return syscall(__NR_inotify_add_watch, fd, path, mask);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__inotify_rm_watch(int fd, int32_t wd) {
#if defined(__NR_inotify_rm_watch)
return syscall(__NR_inotify_rm_watch, fd, wd);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__pipe2(int pipefd[2], int flags) {
#if defined(__NR_pipe2)
return syscall(__NR_pipe2, pipefd, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__sendmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags) {
#if defined(__NR_sendmmsg)
return syscall(__NR_sendmmsg, fd, mmsg, vlen, flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__recvmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags,
struct timespec* timeout) {
#if defined(__NR_recvmmsg)
return syscall(__NR_recvmmsg, fd, mmsg, vlen, flags, timeout);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__utimesat(int dirfd,
const char* path,
const struct timespec times[2],
int flags)
{
#if defined(__NR_utimensat)
return syscall(__NR_utimensat, dirfd, path, times, flags);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset) {
#if defined(__NR_preadv)
return syscall(__NR_preadv, fd, iov, iovcnt, offset);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset) {
#if defined(__NR_pwritev)
return syscall(__NR_pwritev, fd, iov, iovcnt, offset);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__dup3(int oldfd, int newfd, int flags) {
#if defined(__NR_dup3)
return syscall(__NR_dup3, oldfd, newfd, flags);
#else
return errno = ENOSYS, -1;
#endif
}

View File

@ -0,0 +1,154 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_LINUX_SYSCALL_H_
#define UV_LINUX_SYSCALL_H_
#undef _GNU_SOURCE
#define _GNU_SOURCE
#include <stdint.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/socket.h>
#if defined(__alpha__)
# define UV__O_CLOEXEC 0x200000
#elif defined(__hppa__)
# define UV__O_CLOEXEC 0x200000
#elif defined(__sparc__)
# define UV__O_CLOEXEC 0x400000
#else
# define UV__O_CLOEXEC 0x80000
#endif
#if defined(__alpha__)
# define UV__O_NONBLOCK 0x4
#elif defined(__hppa__)
# define UV__O_NONBLOCK 0x10004
#elif defined(__mips__)
# define UV__O_NONBLOCK 0x80
#elif defined(__sparc__)
# define UV__O_NONBLOCK 0x4000
#else
# define UV__O_NONBLOCK 0x800
#endif
#define UV__EFD_CLOEXEC UV__O_CLOEXEC
#define UV__EFD_NONBLOCK UV__O_NONBLOCK
#define UV__IN_CLOEXEC UV__O_CLOEXEC
#define UV__IN_NONBLOCK UV__O_NONBLOCK
#define UV__SOCK_CLOEXEC UV__O_CLOEXEC
#define UV__SOCK_NONBLOCK UV__O_NONBLOCK
/* epoll flags */
#define UV__EPOLL_CLOEXEC UV__O_CLOEXEC
#define UV__EPOLL_CTL_ADD 1
#define UV__EPOLL_CTL_DEL 2
#define UV__EPOLL_CTL_MOD 3
#define UV__EPOLLIN 1
#define UV__EPOLLOUT 4
#define UV__EPOLLERR 8
#define UV__EPOLLHUP 16
#define UV__EPOLLONESHOT 0x40000000
#define UV__EPOLLET 0x80000000
/* inotify flags */
#define UV__IN_ACCESS 0x001
#define UV__IN_MODIFY 0x002
#define UV__IN_ATTRIB 0x004
#define UV__IN_CLOSE_WRITE 0x008
#define UV__IN_CLOSE_NOWRITE 0x010
#define UV__IN_OPEN 0x020
#define UV__IN_MOVED_FROM 0x040
#define UV__IN_MOVED_TO 0x080
#define UV__IN_CREATE 0x100
#define UV__IN_DELETE 0x200
#define UV__IN_DELETE_SELF 0x400
#define UV__IN_MOVE_SELF 0x800
#if defined(__x86_64__)
struct uv__epoll_event {
uint32_t events;
uint64_t data;
} __attribute__((packed));
#else
struct uv__epoll_event {
uint32_t events;
uint64_t data;
};
#endif
struct uv__inotify_event {
int32_t wd;
uint32_t mask;
uint32_t cookie;
uint32_t len;
/* char name[0]; */
};
struct uv__mmsghdr {
struct msghdr msg_hdr;
unsigned int msg_len;
};
int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags);
int uv__eventfd(unsigned int count);
int uv__epoll_create(int size);
int uv__epoll_create1(int flags);
int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event *ev);
int uv__epoll_wait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout);
int uv__epoll_pwait(int epfd,
struct uv__epoll_event* events,
int nevents,
int timeout,
const sigset_t* sigmask);
int uv__eventfd2(unsigned int count, int flags);
int uv__inotify_init(void);
int uv__inotify_init1(int flags);
int uv__inotify_add_watch(int fd, const char* path, uint32_t mask);
int uv__inotify_rm_watch(int fd, int32_t wd);
int uv__pipe2(int pipefd[2], int flags);
int uv__recvmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags,
struct timespec* timeout);
int uv__sendmmsg(int fd,
struct uv__mmsghdr* mmsg,
unsigned int vlen,
unsigned int flags);
int uv__utimesat(int dirfd,
const char* path,
const struct timespec times[2],
int flags);
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset);
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset);
int uv__dup3(int oldfd, int newfd, int flags);
#endif /* UV_LINUX_SYSCALL_H_ */

View File

@ -0,0 +1,63 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#define UV_LOOP_WATCHER_DEFINE(name, type) \
int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \
handle->name##_cb = NULL; \
return 0; \
} \
\
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
if (uv__is_active(handle)) return 0; \
if (cb == NULL) return -EINVAL; \
QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \
handle->name##_cb = cb; \
uv__handle_start(handle); \
return 0; \
} \
\
int uv_##name##_stop(uv_##name##_t* handle) { \
if (!uv__is_active(handle)) return 0; \
QUEUE_REMOVE(&handle->queue); \
uv__handle_stop(handle); \
return 0; \
} \
\
void uv__run_##name(uv_loop_t* loop) { \
uv_##name##_t* h; \
QUEUE* q; \
QUEUE_FOREACH(q, &loop->name##_handles) { \
h = QUEUE_DATA(q, uv_##name##_t, queue); \
h->name##_cb(h); \
} \
} \
\
void uv__##name##_close(uv_##name##_t* handle) { \
uv_##name##_stop(handle); \
}
UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
UV_LOOP_WATCHER_DEFINE(check, CHECK)
UV_LOOP_WATCHER_DEFINE(idle, IDLE)

View File

@ -0,0 +1,197 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "tree.h"
#include "internal.h"
#include "heap-inl.h"
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static int uv__loop_init(uv_loop_t* loop, int default_loop);
static void uv__loop_close(uv_loop_t* loop);
static uv_loop_t default_loop_struct;
static uv_loop_t* default_loop_ptr;
uv_loop_t* uv_default_loop(void) {
if (default_loop_ptr != NULL)
return default_loop_ptr;
if (uv__loop_init(&default_loop_struct, /* default_loop? */ 1))
return NULL;
default_loop_ptr = &default_loop_struct;
return default_loop_ptr;
}
int uv_loop_init(uv_loop_t* loop) {
return uv__loop_init(loop, /* default_loop? */ 0);
}
int uv_loop_close(uv_loop_t* loop) {
QUEUE* q;
uv_handle_t* h;
if (!QUEUE_EMPTY(&(loop)->active_reqs))
return -EBUSY;
QUEUE_FOREACH(q, &loop->handle_queue) {
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
if (!(h->flags & UV__HANDLE_INTERNAL))
return -EBUSY;
}
uv__loop_close(loop);
#ifndef NDEBUG
memset(loop, -1, sizeof(*loop));
#endif
if (loop == default_loop_ptr)
default_loop_ptr = NULL;
return 0;
}
uv_loop_t* uv_loop_new(void) {
uv_loop_t* loop;
loop = malloc(sizeof(*loop));
if (loop == NULL)
return NULL;
if (uv_loop_init(loop)) {
free(loop);
return NULL;
}
return loop;
}
void uv_loop_delete(uv_loop_t* loop) {
uv_loop_t* default_loop;
int err;
default_loop = default_loop_ptr;
err = uv_loop_close(loop);
assert(err == 0);
if (loop != default_loop)
free(loop);
}
static int uv__loop_init(uv_loop_t* loop, int default_loop) {
unsigned int i;
int err;
uv__signal_global_once_init();
memset(loop, 0, sizeof(*loop));
heap_init((struct heap*) &loop->timer_heap);
QUEUE_INIT(&loop->wq);
QUEUE_INIT(&loop->active_reqs);
QUEUE_INIT(&loop->idle_handles);
QUEUE_INIT(&loop->async_handles);
QUEUE_INIT(&loop->check_handles);
QUEUE_INIT(&loop->prepare_handles);
QUEUE_INIT(&loop->handle_queue);
loop->nfds = 0;
loop->watchers = NULL;
loop->nwatchers = 0;
QUEUE_INIT(&loop->pending_queue);
QUEUE_INIT(&loop->watcher_queue);
loop->closing_handles = NULL;
uv__update_time(loop);
uv__async_init(&loop->async_watcher);
loop->signal_pipefd[0] = -1;
loop->signal_pipefd[1] = -1;
loop->backend_fd = -1;
loop->emfile_fd = -1;
loop->timer_counter = 0;
loop->stop_flag = 0;
err = uv__platform_loop_init(loop, default_loop);
if (err)
return err;
uv_signal_init(loop, &loop->child_watcher);
uv__handle_unref(&loop->child_watcher);
loop->child_watcher.flags |= UV__HANDLE_INTERNAL;
for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++)
QUEUE_INIT(loop->process_handles + i);
if (uv_rwlock_init(&loop->cloexec_lock))
abort();
if (uv_mutex_init(&loop->wq_mutex))
abort();
if (uv_async_init(loop, &loop->wq_async, uv__work_done))
abort();
uv__handle_unref(&loop->wq_async);
loop->wq_async.flags |= UV__HANDLE_INTERNAL;
return 0;
}
static void uv__loop_close(uv_loop_t* loop) {
uv__signal_loop_cleanup(loop);
uv__platform_loop_delete(loop);
uv__async_stop(loop, &loop->async_watcher);
if (loop->emfile_fd != -1) {
uv__close(loop->emfile_fd);
loop->emfile_fd = -1;
}
if (loop->backend_fd != -1) {
uv__close(loop->backend_fd);
loop->backend_fd = -1;
}
uv_mutex_lock(&loop->wq_mutex);
assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
/*
* Note that all thread pool stuff is finished at this point and
* it is safe to just destroy rw lock
*/
uv_rwlock_destroy(&loop->cloexec_lock);
#if 0
assert(QUEUE_EMPTY(&loop->pending_queue));
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(loop->nfds == 0);
#endif
free(loop->watchers);
loop->watchers = NULL;
loop->nwatchers = 0;
}

View File

@ -0,0 +1,368 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <kvm.h>
#include <paths.h>
#include <ifaddrs.h>
#include <unistd.h>
#include <time.h>
#include <stdlib.h>
#include <fcntl.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <uvm/uvm_extern.h>
#include <unistd.h>
#include <time.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
static char *process_title;
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) == -1) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
size_t cb;
pid_t mypid;
if (buffer == NULL || size == NULL)
return -EINVAL;
mypid = getpid();
mib[0] = CTL_KERN;
mib[1] = KERN_PROC_ARGS;
mib[2] = mypid;
mib[3] = KERN_PROC_ARGV;
cb = *size;
if (sysctl(mib, 4, buffer, &cb, NULL, 0))
return -errno;
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
struct uvmexp info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
#if defined(HW_PHYSMEM64)
uint64_t info;
int which[] = {CTL_HW, HW_PHYSMEM64};
#else
unsigned int info;
int which[] = {CTL_HW, HW_PHYSMEM};
#endif
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
return (uint64_t) info;
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc ? strdup(argv[0]) : NULL;
return argv;
}
int uv_set_process_title(const char* title) {
if (process_title) free(process_title);
process_title = strdup(title);
setproctitle("%s", title);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (process_title) {
strncpy(buffer, process_title, size);
} else {
if (size > 0) {
buffer[0] = '\0';
}
}
return 0;
}
int uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc2 *kinfo = NULL;
pid_t pid;
int nprocs;
int max_size = sizeof(struct kinfo_proc2);
int page_size;
page_size = getpagesize();
pid = getpid();
kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open");
if (kd == NULL) goto error;
kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs);
if (kinfo == NULL) goto error;
*rss = kinfo->p_vm_rssize * page_size;
kvm_close(kd);
return 0;
error:
if (kd) kvm_close(kd);
return -EPERM;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK);
unsigned int multiplier = ((uint64_t)1000L / ticks);
unsigned int cur = 0;
uv_cpu_info_t* cpu_info;
u_int64_t* cp_times;
char model[512];
u_int64_t cpuspeed;
int numcpus;
size_t size;
int i;
size = sizeof(model);
if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) &&
sysctlbyname("hw.model", &model, &size, NULL, 0)) {
return -errno;
}
size = sizeof(numcpus);
if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
return -errno;
*count = numcpus;
/* Only i386 and amd64 have machdep.tsc_freq */
size = sizeof(cpuspeed);
if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0))
cpuspeed = 0;
size = numcpus * CPUSTATES * sizeof(*cp_times);
cp_times = malloc(size);
if (cp_times == NULL)
return -ENOMEM;
if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0))
return -errno;
*cpu_infos = malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos)) {
free(cp_times);
free(*cpu_infos);
return -ENOMEM;
}
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6);
cur += CPUSTATES;
}
free(cp_times);
return 0;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_dl *sa_addr;
if (getifaddrs(&addrs))
return -errno;
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != PF_INET)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses))
return -ENOMEM;
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
continue;
if (ent->ifa_addr == NULL)
continue;
if (ent->ifa_addr->sa_family != PF_INET)
continue;
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != AF_LINK)) {
continue;
}
address = *addresses;
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -0,0 +1,382 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/sched.h>
#include <sys/time.h>
#include <sys/sysctl.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <errno.h>
#include <fcntl.h>
#include <kvm.h>
#include <paths.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
static char *process_title;
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
char **argsbuf = NULL;
char **argsbuf_tmp;
size_t argsbuf_size = 100U;
size_t exepath_size;
pid_t mypid;
int err;
if (buffer == NULL || size == NULL)
return -EINVAL;
mypid = getpid();
for (;;) {
err = -ENOMEM;
argsbuf_tmp = realloc(argsbuf, argsbuf_size);
if (argsbuf_tmp == NULL)
goto out;
argsbuf = argsbuf_tmp;
mib[0] = CTL_KERN;
mib[1] = KERN_PROC_ARGS;
mib[2] = mypid;
mib[3] = KERN_PROC_ARGV;
if (sysctl(mib, 4, argsbuf, &argsbuf_size, NULL, 0) == 0) {
break;
}
if (errno != ENOMEM) {
err = -errno;
goto out;
}
argsbuf_size *= 2U;
}
if (argsbuf[0] == NULL) {
err = -EINVAL; /* FIXME(bnoordhuis) More appropriate error. */
goto out;
}
exepath_size = strlen(argsbuf[0]);
if (exepath_size >= *size) {
err = -EINVAL;
goto out;
}
memcpy(buffer, argsbuf[0], exepath_size + 1U);
*size = exepath_size;
err = 0;
out:
free(argsbuf);
return err;
}
uint64_t uv_get_free_memory(void) {
struct uvmexp info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
uint64_t info;
int which[] = {CTL_HW, HW_PHYSMEM64};
size_t size = sizeof(info);
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
return (uint64_t) info;
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc ? strdup(argv[0]) : NULL;
return argv;
}
int uv_set_process_title(const char* title) {
if (process_title) free(process_title);
process_title = strdup(title);
setproctitle(title);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (process_title) {
strncpy(buffer, process_title, size);
} else {
if (size > 0) {
buffer[0] = '\0';
}
}
return 0;
}
int uv_resident_set_memory(size_t* rss) {
struct kinfo_proc kinfo;
size_t page_size = getpagesize();
size_t size = sizeof(struct kinfo_proc);
int mib[6];
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PID;
mib[3] = getpid();
mib[4] = sizeof(struct kinfo_proc);
mib[5] = 1;
if (sysctl(mib, 6, &kinfo, &size, NULL, 0) < 0)
return -errno;
*rss = kinfo.p_vm_rssize * page_size;
return 0;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, 2, &info, &size, NULL, 0))
return -errno;
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed;
uint64_t info[CPUSTATES];
char model[512];
int numcpus = 1;
int which[] = {CTL_HW,HW_MODEL,0};
size_t size;
int i;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctl(which, 2, &model, &size, NULL, 0))
return -errno;
which[1] = HW_NCPU;
size = sizeof(numcpus);
if (sysctl(which, 2, &numcpus, &size, NULL, 0))
return -errno;
*cpu_infos = malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos))
return -ENOMEM;
*count = numcpus;
which[1] = HW_CPUSPEED;
size = sizeof(cpuspeed);
if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) {
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
size = sizeof(info);
which[0] = CTL_KERN;
which[1] = KERN_CPTIME2;
for (i = 0; i < numcpus; i++) {
which[2] = i;
size = sizeof(info);
if (sysctl(which, 3, &info, &size, NULL, 0)) {
SAVE_ERRNO(free(*cpu_infos));
return -errno;
}
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
cpu_info->model = strdup(model);
cpu_info->speed = cpuspeed;
}
return 0;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
int uv_interface_addresses(uv_interface_address_t** addresses,
int* count) {
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_dl *sa_addr;
if (getifaddrs(&addrs) != 0)
return -errno;
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != PF_INET)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses))
return -ENOMEM;
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
continue;
if (ent->ifa_addr == NULL)
continue;
if (ent->ifa_addr->sa_family != PF_INET)
continue;
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != AF_LINK)) {
continue;
}
address = *addresses;
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -0,0 +1,276 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdlib.h>
int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
handle->shutdown_req = NULL;
handle->connect_req = NULL;
handle->pipe_fname = NULL;
handle->ipc = ipc;
return 0;
}
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
struct sockaddr_un saddr;
const char* pipe_fname;
int sockfd;
int bound;
int err;
pipe_fname = NULL;
sockfd = -1;
bound = 0;
err = -EINVAL;
/* Already bound? */
if (uv__stream_fd(handle) >= 0)
return -EINVAL;
/* Make a copy of the file name, it outlives this function's scope. */
pipe_fname = strdup(name);
if (pipe_fname == NULL) {
err = -ENOMEM;
goto out;
}
/* We've got a copy, don't touch the original any more. */
name = NULL;
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
goto out;
sockfd = err;
memset(&saddr, 0, sizeof saddr);
strncpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path) - 1);
saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0';
saddr.sun_family = AF_UNIX;
if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
err = -errno;
/* Convert ENOENT to EACCES for compatibility with Windows. */
if (err == -ENOENT)
err = -EACCES;
goto out;
}
bound = 1;
/* Success. */
handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
handle->io_watcher.fd = sockfd;
return 0;
out:
if (bound) {
/* unlink() before uv__close() to avoid races. */
assert(pipe_fname != NULL);
unlink(pipe_fname);
}
uv__close(sockfd);
free((void*)pipe_fname);
return err;
}
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
if (uv__stream_fd(handle) == -1)
return -EINVAL;
if (listen(uv__stream_fd(handle), backlog))
return -errno;
handle->connection_cb = cb;
handle->io_watcher.cb = uv__server_io;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN);
return 0;
}
void uv__pipe_close(uv_pipe_t* handle) {
if (handle->pipe_fname) {
/*
* Unlink the file system entity before closing the file descriptor.
* Doing it the other way around introduces a race where our process
* unlinks a socket with the same name that's just been created by
* another thread or process.
*/
unlink(handle->pipe_fname);
free((void*)handle->pipe_fname);
handle->pipe_fname = NULL;
}
uv__stream_close((uv_stream_t*)handle);
}
int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
#if defined(__APPLE__)
int err;
err = uv__stream_try_select((uv_stream_t*) handle, &fd);
if (err)
return err;
#endif /* defined(__APPLE__) */
return uv__stream_open((uv_stream_t*)handle,
fd,
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
}
void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
struct sockaddr_un saddr;
int new_sock;
int err;
int r;
new_sock = (uv__stream_fd(handle) == -1);
err = -EINVAL;
if (new_sock) {
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
goto out;
handle->io_watcher.fd = err;
}
memset(&saddr, 0, sizeof saddr);
strncpy(saddr.sun_path, name, sizeof(saddr.sun_path) - 1);
saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0';
saddr.sun_family = AF_UNIX;
do {
r = connect(uv__stream_fd(handle),
(struct sockaddr*)&saddr, sizeof saddr);
}
while (r == -1 && errno == EINTR);
if (r == -1 && errno != EINPROGRESS) {
err = -errno;
goto out;
}
err = 0;
if (new_sock) {
err = uv__stream_open((uv_stream_t*)handle,
uv__stream_fd(handle),
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
}
if (err == 0)
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN | UV__POLLOUT);
out:
handle->delayed_error = err;
handle->connect_req = req;
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*)handle;
req->cb = cb;
QUEUE_INIT(&req->queue);
/* Force callback to run on next tick in case of error. */
if (err)
uv__io_feed(handle->loop, &handle->io_watcher);
/* Mimic the Windows pipe implementation, always
* return 0 and let the callback handle errors.
*/
}
int uv_pipe_getsockname(const uv_pipe_t* handle, char* buf, size_t* len) {
struct sockaddr_un sa;
socklen_t addrlen;
int err;
addrlen = sizeof(sa);
memset(&sa, 0, addrlen);
err = getsockname(uv__stream_fd(handle), (struct sockaddr*) &sa, &addrlen);
if (err < 0) {
*len = 0;
return -errno;
}
if (sa.sun_path[0] == 0)
/* Linux abstract namespace */
addrlen -= offsetof(struct sockaddr_un, sun_path);
else
addrlen = strlen(sa.sun_path) + 1;
if (addrlen > *len) {
*len = addrlen;
return UV_ENOBUFS;
}
memcpy(buf, sa.sun_path, addrlen);
*len = addrlen;
return 0;
}
void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
}
int uv_pipe_pending_count(uv_pipe_t* handle) {
uv__stream_queued_fds_t* queued_fds;
if (!handle->ipc)
return 0;
if (handle->accepted_fd == -1)
return 0;
if (handle->queued_fds == NULL)
return 1;
queued_fds = handle->queued_fds;
return queued_fds->offset + 1;
}
uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
if (!handle->ipc)
return UV_UNKNOWN_HANDLE;
if (handle->accepted_fd == -1)
return UV_UNKNOWN_HANDLE;
else
return uv__handle_type(handle->accepted_fd);
}

View File

@ -0,0 +1,107 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <unistd.h>
#include <assert.h>
#include <errno.h>
static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_poll_t* handle;
int pevents;
handle = container_of(w, uv_poll_t, io_watcher);
if (events & UV__POLLERR) {
uv__io_stop(loop, w, UV__POLLIN | UV__POLLOUT);
uv__handle_stop(handle);
handle->poll_cb(handle, -EBADF, 0);
return;
}
pevents = 0;
if (events & UV__POLLIN)
pevents |= UV_READABLE;
if (events & UV__POLLOUT)
pevents |= UV_WRITABLE;
handle->poll_cb(handle, 0, pevents);
}
int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
uv__io_init(&handle->io_watcher, uv__poll_io, fd);
handle->poll_cb = NULL;
return 0;
}
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
uv_os_sock_t socket) {
return uv_poll_init(loop, handle, socket);
}
static void uv__poll_stop(uv_poll_t* handle) {
uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLIN | UV__POLLOUT);
uv__handle_stop(handle);
}
int uv_poll_stop(uv_poll_t* handle) {
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
uv__poll_stop(handle);
return 0;
}
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
int events;
assert((pevents & ~(UV_READABLE | UV_WRITABLE)) == 0);
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
uv__poll_stop(handle);
if (pevents == 0)
return 0;
events = 0;
if (pevents & UV_READABLE)
events |= UV__POLLIN;
if (pevents & UV_WRITABLE)
events |= UV__POLLOUT;
uv__io_start(handle->loop, &handle->io_watcher, events);
uv__handle_start(handle);
handle->poll_cb = poll_cb;
return 0;
}
void uv__poll_close(uv_poll_t* handle) {
uv__poll_stop(handle);
}

View File

@ -0,0 +1,532 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#if defined(__APPLE__) && !TARGET_OS_IPHONE
# include <crt_externs.h>
# define environ (*_NSGetEnviron())
#else
extern char **environ;
#endif
#ifdef __linux__
# include <grp.h>
#endif
static QUEUE* uv__process_queue(uv_loop_t* loop, int pid) {
assert(pid > 0);
return loop->process_handles + pid % ARRAY_SIZE(loop->process_handles);
}
static void uv__chld(uv_signal_t* handle, int signum) {
uv_process_t* process;
uv_loop_t* loop;
int exit_status;
int term_signal;
unsigned int i;
int status;
pid_t pid;
QUEUE pending;
QUEUE* h;
QUEUE* q;
assert(signum == SIGCHLD);
QUEUE_INIT(&pending);
loop = handle->loop;
for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) {
h = loop->process_handles + i;
q = QUEUE_HEAD(h);
while (q != h) {
process = QUEUE_DATA(q, uv_process_t, queue);
q = QUEUE_NEXT(q);
do
pid = waitpid(process->pid, &status, WNOHANG);
while (pid == -1 && errno == EINTR);
if (pid == 0)
continue;
if (pid == -1) {
if (errno != ECHILD)
abort();
continue;
}
process->status = status;
QUEUE_REMOVE(&process->queue);
QUEUE_INSERT_TAIL(&pending, &process->queue);
}
while (!QUEUE_EMPTY(&pending)) {
q = QUEUE_HEAD(&pending);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
process = QUEUE_DATA(q, uv_process_t, queue);
uv__handle_stop(process);
if (process->exit_cb == NULL)
continue;
exit_status = 0;
if (WIFEXITED(process->status))
exit_status = WEXITSTATUS(process->status);
term_signal = 0;
if (WIFSIGNALED(process->status))
term_signal = WTERMSIG(process->status);
process->exit_cb(process, exit_status, term_signal);
}
}
}
int uv__make_socketpair(int fds[2], int flags) {
#if defined(__linux__)
static int no_cloexec;
if (no_cloexec)
goto skip;
if (socketpair(AF_UNIX, SOCK_STREAM | UV__SOCK_CLOEXEC | flags, 0, fds) == 0)
return 0;
/* Retry on EINVAL, it means SOCK_CLOEXEC is not supported.
* Anything else is a genuine error.
*/
if (errno != EINVAL)
return -errno;
no_cloexec = 1;
skip:
#endif
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
return -errno;
uv__cloexec(fds[0], 1);
uv__cloexec(fds[1], 1);
if (flags & UV__F_NONBLOCK) {
uv__nonblock(fds[0], 1);
uv__nonblock(fds[1], 1);
}
return 0;
}
int uv__make_pipe(int fds[2], int flags) {
#if defined(__linux__)
static int no_pipe2;
if (no_pipe2)
goto skip;
if (uv__pipe2(fds, flags | UV__O_CLOEXEC) == 0)
return 0;
if (errno != ENOSYS)
return -errno;
no_pipe2 = 1;
skip:
#endif
if (pipe(fds))
return -errno;
uv__cloexec(fds[0], 1);
uv__cloexec(fds[1], 1);
if (flags & UV__F_NONBLOCK) {
uv__nonblock(fds[0], 1);
uv__nonblock(fds[1], 1);
}
return 0;
}
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
*/
static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
int mask;
int fd;
mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM;
switch (container->flags & mask) {
case UV_IGNORE:
return 0;
case UV_CREATE_PIPE:
assert(container->data.stream != NULL);
if (container->data.stream->type != UV_NAMED_PIPE)
return -EINVAL;
else
return uv__make_socketpair(fds, 0);
case UV_INHERIT_FD:
case UV_INHERIT_STREAM:
if (container->flags & UV_INHERIT_FD)
fd = container->data.fd;
else
fd = uv__stream_fd(container->data.stream);
if (fd == -1)
return -EINVAL;
fds[1] = fd;
return 0;
default:
assert(0 && "Unexpected flags");
return -EINVAL;
}
}
static int uv__process_open_stream(uv_stdio_container_t* container,
int pipefds[2],
int writable) {
int flags;
if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0)
return 0;
if (uv__close(pipefds[1]))
if (errno != EINTR && errno != EINPROGRESS)
abort();
pipefds[1] = -1;
uv__nonblock(pipefds[0], 1);
if (container->data.stream->type == UV_NAMED_PIPE &&
((uv_pipe_t*)container->data.stream)->ipc)
flags = UV_STREAM_READABLE | UV_STREAM_WRITABLE;
else if (writable)
flags = UV_STREAM_WRITABLE;
else
flags = UV_STREAM_READABLE;
return uv__stream_open(container->data.stream, pipefds[0], flags);
}
static void uv__process_close_stream(uv_stdio_container_t* container) {
if (!(container->flags & UV_CREATE_PIPE)) return;
uv__stream_close((uv_stream_t*)container->data.stream);
}
static void uv__write_int(int fd, int val) {
ssize_t n;
do
n = write(fd, &val, sizeof(val));
while (n == -1 && errno == EINTR);
if (n == -1 && errno == EPIPE)
return; /* parent process has quit */
assert(n == sizeof(val));
}
static void uv__process_child_init(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
int error_fd) {
int close_fd;
int use_fd;
int fd;
if (options->flags & UV_PROCESS_DETACHED)
setsid();
for (fd = 0; fd < stdio_count; fd++) {
close_fd = pipes[fd][0];
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set
*/
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
if (use_fd == -1) {
uv__write_int(error_fd, -errno);
_exit(127);
}
}
}
if (fd == use_fd)
uv__cloexec(use_fd, 0);
else
dup2(use_fd, fd);
if (fd <= 2)
uv__nonblock(fd, 0);
if (close_fd >= stdio_count)
uv__close(close_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd >= 0 && fd != use_fd)
close(use_fd);
}
if (options->cwd != NULL && chdir(options->cwd)) {
uv__write_int(error_fd, -errno);
_exit(127);
}
if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
/* When dropping privileges from root, the `setgroups` call will
* remove any extraneous groups. If we don't call this, then
* even though our uid has dropped, we may still have groups
* that enable us to do super-user things. This will fail if we
* aren't root, so don't bother checking the return value, this
* is just done as an optimistic privilege dropping function.
*/
SAVE_ERRNO(setgroups(0, NULL));
}
if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) {
uv__write_int(error_fd, -errno);
_exit(127);
}
if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) {
uv__write_int(error_fd, -errno);
_exit(127);
}
if (options->env != NULL) {
environ = options->env;
}
execvp(options->file, options->args);
uv__write_int(error_fd, -errno);
_exit(127);
}
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
const uv_process_options_t* options) {
int signal_pipe[2] = { -1, -1 };
int (*pipes)[2];
int stdio_count;
QUEUE* q;
ssize_t r;
pid_t pid;
int err;
int exec_errorno;
int i;
assert(options->file != NULL);
assert(!(options->flags & ~(UV_PROCESS_DETACHED |
UV_PROCESS_SETGID |
UV_PROCESS_SETUID |
UV_PROCESS_WINDOWS_HIDE |
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
QUEUE_INIT(&process->queue);
stdio_count = options->stdio_count;
if (stdio_count < 3)
stdio_count = 3;
err = -ENOMEM;
pipes = malloc(stdio_count * sizeof(*pipes));
if (pipes == NULL)
goto error;
for (i = 0; i < stdio_count; i++) {
pipes[i][0] = -1;
pipes[i][1] = -1;
}
for (i = 0; i < options->stdio_count; i++) {
err = uv__process_init_stdio(options->stdio + i, pipes[i]);
if (err)
goto error;
}
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
*
* if ((pid = fork()) > 0) {
* kill(pid, SIGTERM);
* }
* else if (pid == 0) {
* execve("/bin/cat", argp, envp);
* }
*
* The parent sends a signal immediately after forking.
* Since the child may not have called `execve()` yet,
* there is no telling what process receives the signal,
* our fork or /bin/cat.
*
* To avoid ambiguity, we create a pipe with both ends
* marked close-on-exec. Then, after the call to `fork()`,
* the parent polls the read end until it EOFs or errors with EPIPE.
*/
err = uv__make_pipe(signal_pipe, 0);
if (err)
goto error;
uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
/* Acquire write lock to prevent opening new fds in worker threads */
uv_rwlock_wrlock(&loop->cloexec_lock);
pid = fork();
if (pid == -1) {
err = -errno;
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[0]);
uv__close(signal_pipe[1]);
goto error;
}
if (pid == 0) {
uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
abort();
}
/* Release lock in parent process */
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[1]);
process->status = 0;
exec_errorno = 0;
do
r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
while (r == -1 && errno == EINTR);
if (r == 0)
; /* okay, EOF */
else if (r == sizeof(exec_errorno))
; /* okay, read errorno */
else if (r == -1 && errno == EPIPE)
; /* okay, got EPIPE */
else
abort();
uv__close(signal_pipe[0]);
for (i = 0; i < options->stdio_count; i++) {
err = uv__process_open_stream(options->stdio + i, pipes[i], i == 0);
if (err == 0)
continue;
while (i--)
uv__process_close_stream(options->stdio + i);
goto error;
}
/* Only activate this handle if exec() happened successfully */
if (exec_errorno == 0) {
q = uv__process_queue(loop, pid);
QUEUE_INSERT_TAIL(q, &process->queue);
uv__handle_start(process);
}
process->pid = pid;
process->exit_cb = options->exit_cb;
free(pipes);
return exec_errorno;
error:
if (pipes != NULL) {
for (i = 0; i < stdio_count; i++) {
if (i < options->stdio_count)
if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM))
continue;
if (pipes[i][0] != -1)
close(pipes[i][0]);
if (pipes[i][1] != -1)
close(pipes[i][1]);
}
free(pipes);
}
return err;
}
int uv_process_kill(uv_process_t* process, int signum) {
return uv_kill(process->pid, signum);
}
int uv_kill(int pid, int signum) {
if (kill(pid, signum))
return -errno;
else
return 0;
}
void uv__process_close(uv_process_t* handle) {
/* TODO stop signal watcher when this is the last handle */
QUEUE_REMOVE(&handle->queue);
uv__handle_stop(handle);
}

View File

@ -0,0 +1,102 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <string.h>
extern void uv__set_process_title(const char* title);
static void* args_mem;
static struct {
char* str;
size_t len;
} process_title;
char** uv_setup_args(int argc, char** argv) {
char** new_argv;
size_t size;
char* s;
int i;
if (argc <= 0)
return argv;
/* Calculate how much memory we need for the argv strings. */
size = 0;
for (i = 0; i < argc; i++)
size += strlen(argv[i]) + 1;
process_title.str = argv[0];
process_title.len = argv[argc - 1] + strlen(argv[argc - 1]) - argv[0];
assert(process_title.len + 1 == size); /* argv memory should be adjacent. */
/* Add space for the argv pointers. */
size += (argc + 1) * sizeof(char*);
new_argv = malloc(size);
if (new_argv == NULL)
return argv;
args_mem = new_argv;
/* Copy over the strings and set up the pointer table. */
s = (char*) &new_argv[argc + 1];
for (i = 0; i < argc; i++) {
size = strlen(argv[i]) + 1;
memcpy(s, argv[i], size);
new_argv[i] = s;
s += size;
}
new_argv[i] = NULL;
return new_argv;
}
int uv_set_process_title(const char* title) {
if (process_title.len == 0)
return 0;
/* No need to terminate, byte after is always '\0'. */
strncpy(process_title.str, title, process_title.len);
uv__set_process_title(title);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (process_title.len > 0)
strncpy(buffer, process_title.str, size);
else if (size > 0)
buffer[0] = '\0';
return 0;
}
UV_DESTRUCTOR(static void free_args_mem(void)) {
free(args_mem); /* Keep valgrind happy. */
args_mem = NULL;
}

View File

@ -0,0 +1,103 @@
/* Copyright (c) 2013, Sony Mobile Communications AB
* Copyright (c) 2012, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Android versions < 4.1 have a broken pthread_sigmask.
* Note that this block of code must come before any inclusion of
* pthread-fixes.h so that the real pthread_sigmask can be referenced.
* */
#include <errno.h>
#include <pthread.h>
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
static int workaround;
if (workaround) {
return sigprocmask(how, set, oset);
} else if (pthread_sigmask(how, set, oset)) {
if (errno == EINVAL && sigprocmask(how, set, oset) == 0) {
workaround = 1;
return 0;
} else {
return -1;
}
} else {
return 0;
}
}
/*Android doesn't provide pthread_barrier_t for now.*/
#ifndef PTHREAD_BARRIER_SERIAL_THREAD
#include "pthread-fixes.h"
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count) {
barrier->count = count;
pthread_mutex_init(&barrier->mutex, NULL);
pthread_cond_init(&barrier->cond, NULL);
return 0;
}
int pthread_barrier_wait(pthread_barrier_t* barrier) {
/* Lock the mutex*/
pthread_mutex_lock(&barrier->mutex);
/* Decrement the count. If this is the first thread to reach 0, wake up
waiters, unlock the mutex, then return PTHREAD_BARRIER_SERIAL_THREAD.*/
if (--barrier->count == 0) {
/* First thread to reach the barrier */
pthread_cond_broadcast(&barrier->cond);
pthread_mutex_unlock(&barrier->mutex);
return PTHREAD_BARRIER_SERIAL_THREAD;
}
/* Otherwise, wait for other threads until the count reaches 0, then
return 0 to indicate this is not the first thread.*/
do {
pthread_cond_wait(&barrier->cond, &barrier->mutex);
} while (barrier->count > 0);
pthread_mutex_unlock(&barrier->mutex);
return 0;
}
int pthread_barrier_destroy(pthread_barrier_t *barrier) {
barrier->count = 0;
pthread_cond_destroy(&barrier->cond);
pthread_mutex_destroy(&barrier->mutex);
return 0;
}
#endif /* defined(PTHREAD_BARRIER_SERIAL_THREAD) */
int pthread_yield(void) {
sched_yield();
return 0;
}

View File

@ -0,0 +1,465 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
typedef struct {
uv_signal_t* handle;
int signum;
} uv__signal_msg_t;
RB_HEAD(uv__signal_tree_s, uv_signal_s);
static int uv__signal_unlock(void);
static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events);
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2);
static void uv__signal_stop(uv_signal_t* handle);
static pthread_once_t uv__signal_global_init_guard = PTHREAD_ONCE_INIT;
static struct uv__signal_tree_s uv__signal_tree =
RB_INITIALIZER(uv__signal_tree);
static int uv__signal_lock_pipefd[2];
RB_GENERATE_STATIC(uv__signal_tree_s,
uv_signal_s, tree_entry,
uv__signal_compare)
static void uv__signal_global_init(void) {
if (uv__make_pipe(uv__signal_lock_pipefd, 0))
abort();
if (uv__signal_unlock())
abort();
}
void uv__signal_global_once_init(void) {
pthread_once(&uv__signal_global_init_guard, uv__signal_global_init);
}
static int uv__signal_lock(void) {
int r;
char data;
do {
r = read(uv__signal_lock_pipefd[0], &data, sizeof data);
} while (r < 0 && errno == EINTR);
return (r < 0) ? -1 : 0;
}
static int uv__signal_unlock(void) {
int r;
char data = 42;
do {
r = write(uv__signal_lock_pipefd[1], &data, sizeof data);
} while (r < 0 && errno == EINTR);
return (r < 0) ? -1 : 0;
}
static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
sigset_t new_mask;
if (sigfillset(&new_mask))
abort();
if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
abort();
if (uv__signal_lock())
abort();
}
static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
if (uv__signal_unlock())
abort();
if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL))
abort();
}
static uv_signal_t* uv__signal_first_handle(int signum) {
/* This function must be called with the signal lock held. */
uv_signal_t lookup;
uv_signal_t* handle;
lookup.signum = signum;
lookup.loop = NULL;
handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup);
if (handle != NULL && handle->signum == signum)
return handle;
return NULL;
}
static void uv__signal_handler(int signum) {
uv__signal_msg_t msg;
uv_signal_t* handle;
int saved_errno;
saved_errno = errno;
memset(&msg, 0, sizeof msg);
if (uv__signal_lock()) {
errno = saved_errno;
return;
}
for (handle = uv__signal_first_handle(signum);
handle != NULL && handle->signum == signum;
handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) {
int r;
msg.signum = signum;
msg.handle = handle;
/* write() should be atomic for small data chunks, so the entire message
* should be written at once. In theory the pipe could become full, in
* which case the user is out of luck.
*/
do {
r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg);
} while (r == -1 && errno == EINTR);
assert(r == sizeof msg ||
(r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
if (r != -1)
handle->caught_signals++;
}
uv__signal_unlock();
errno = saved_errno;
}
static int uv__signal_register_handler(int signum) {
/* When this function is called, the signal lock must be held. */
struct sigaction sa;
/* XXX use a separate signal stack? */
memset(&sa, 0, sizeof(sa));
if (sigfillset(&sa.sa_mask))
abort();
sa.sa_handler = uv__signal_handler;
/* XXX save old action so we can restore it later on? */
if (sigaction(signum, &sa, NULL))
return -errno;
return 0;
}
static void uv__signal_unregister_handler(int signum) {
/* When this function is called, the signal lock must be held. */
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
/* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a
* signal implies that it was successfully registered earlier, so EINVAL
* should never happen.
*/
if (sigaction(signum, &sa, NULL))
abort();
}
static int uv__signal_loop_once_init(uv_loop_t* loop) {
int err;
/* Return if already initialized. */
if (loop->signal_pipefd[0] != -1)
return 0;
err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK);
if (err)
return err;
uv__io_init(&loop->signal_io_watcher,
uv__signal_event,
loop->signal_pipefd[0]);
uv__io_start(loop, &loop->signal_io_watcher, UV__POLLIN);
return 0;
}
void uv__signal_loop_cleanup(uv_loop_t* loop) {
QUEUE* q;
/* Stop all the signal watchers that are still attached to this loop. This
* ensures that the (shared) signal tree doesn't contain any invalid entries
* entries, and that signal handlers are removed when appropriate.
*/
QUEUE_FOREACH(q, &loop->handle_queue) {
uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
if (handle->type == UV_SIGNAL)
uv__signal_stop((uv_signal_t*) handle);
}
if (loop->signal_pipefd[0] != -1) {
uv__close(loop->signal_pipefd[0]);
loop->signal_pipefd[0] = -1;
}
if (loop->signal_pipefd[1] != -1) {
uv__close(loop->signal_pipefd[1]);
loop->signal_pipefd[1] = -1;
}
}
int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
int err;
err = uv__signal_loop_once_init(loop);
if (err)
return err;
uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
handle->signum = 0;
handle->caught_signals = 0;
handle->dispatched_signals = 0;
return 0;
}
void uv__signal_close(uv_signal_t* handle) {
uv__signal_stop(handle);
/* If there are any caught signals "trapped" in the signal pipe, we can't
* call the close callback yet. Otherwise, add the handle to the finish_close
* queue.
*/
if (handle->caught_signals == handle->dispatched_signals) {
uv__make_close_pending((uv_handle_t*) handle);
}
}
int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
sigset_t saved_sigmask;
int err;
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
/* If the user supplies signum == 0, then return an error already. If the
* signum is otherwise invalid then uv__signal_register will find out
* eventually.
*/
if (signum == 0)
return -EINVAL;
/* Short circuit: if the signal watcher is already watching {signum} don't
* go through the process of deregistering and registering the handler.
* Additionally, this avoids pending signals getting lost in the small time
* time frame that handle->signum == 0.
*/
if (signum == handle->signum) {
handle->signal_cb = signal_cb;
return 0;
}
/* If the signal handler was already active, stop it first. */
if (handle->signum != 0) {
uv__signal_stop(handle);
}
uv__signal_block_and_lock(&saved_sigmask);
/* If at this point there are no active signal watchers for this signum (in
* any of the loops), it's time to try and register a handler for it here.
*/
if (uv__signal_first_handle(signum) == NULL) {
err = uv__signal_register_handler(signum);
if (err) {
/* Registering the signal handler failed. Must be an invalid signal. */
uv__signal_unlock_and_unblock(&saved_sigmask);
return err;
}
}
handle->signum = signum;
RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle);
uv__signal_unlock_and_unblock(&saved_sigmask);
handle->signal_cb = signal_cb;
uv__handle_start(handle);
return 0;
}
static void uv__signal_event(uv_loop_t* loop,
uv__io_t* w,
unsigned int events) {
uv__signal_msg_t* msg;
uv_signal_t* handle;
char buf[sizeof(uv__signal_msg_t) * 32];
size_t bytes, end, i;
int r;
bytes = 0;
end = 0;
do {
r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes);
if (r == -1 && errno == EINTR)
continue;
if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
/* If there are bytes in the buffer already (which really is extremely
* unlikely if possible at all) we can't exit the function here. We'll
* spin until more bytes are read instead.
*/
if (bytes > 0)
continue;
/* Otherwise, there was nothing there. */
return;
}
/* Other errors really should never happen. */
if (r == -1)
abort();
bytes += r;
/* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */
end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t);
for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) {
msg = (uv__signal_msg_t*) (buf + i);
handle = msg->handle;
if (msg->signum == handle->signum) {
assert(!(handle->flags & UV_CLOSING));
handle->signal_cb(handle, handle->signum);
}
handle->dispatched_signals++;
/* If uv_close was called while there were caught signals that were not
* yet dispatched, the uv__finish_close was deferred. Make close pending
* now if this has happened.
*/
if ((handle->flags & UV_CLOSING) &&
(handle->caught_signals == handle->dispatched_signals)) {
uv__make_close_pending((uv_handle_t*) handle);
}
}
bytes -= end;
/* If there are any "partial" messages left, move them to the start of the
* the buffer, and spin. This should not happen.
*/
if (bytes) {
memmove(buf, buf + end, bytes);
continue;
}
} while (end == sizeof buf);
}
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
/* Compare signums first so all watchers with the same signnum end up
* adjacent.
*/
if (w1->signum < w2->signum) return -1;
if (w1->signum > w2->signum) return 1;
/* Sort by loop pointer, so we can easily look up the first item after
* { .signum = x, .loop = NULL }.
*/
if (w1->loop < w2->loop) return -1;
if (w1->loop > w2->loop) return 1;
if (w1 < w2) return -1;
if (w1 > w2) return 1;
return 0;
}
int uv_signal_stop(uv_signal_t* handle) {
assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
uv__signal_stop(handle);
return 0;
}
static void uv__signal_stop(uv_signal_t* handle) {
uv_signal_t* removed_handle;
sigset_t saved_sigmask;
/* If the watcher wasn't started, this is a no-op. */
if (handle->signum == 0)
return;
uv__signal_block_and_lock(&saved_sigmask);
removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle);
assert(removed_handle == handle);
(void) removed_handle;
/* Check if there are other active signal watchers observing this signal. If
* not, unregister the signal handler.
*/
if (uv__signal_first_handle(handle->signum) == NULL)
uv__signal_unregister_handler(handle->signum);
uv__signal_unlock_and_unblock(&saved_sigmask);
handle->signum = 0;
uv__handle_stop(handle);
}

View File

@ -0,0 +1,53 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SPINLOCK_H_
#define UV_SPINLOCK_H_
#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
#include "atomic-ops.h"
#define UV_SPINLOCK_INITIALIZER { 0 }
typedef struct {
int lock;
} uv_spinlock_t;
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
while (!uv_spinlock_trylock(spinlock)) cpu_relax();
}
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
/* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
* Not really critical until we have locks that are (frequently) contended
* for by several threads.
*/
return 0 == cmpxchgi(&spinlock->lock, 0, 1);
}
#endif /* UV_SPINLOCK_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,740 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifndef SUNOS_NO_IFADDRS
# include <ifaddrs.h>
#endif
#include <net/if.h>
#include <net/if_dl.h>
#include <sys/loadavg.h>
#include <sys/time.h>
#include <unistd.h>
#include <kstat.h>
#include <fcntl.h>
#include <sys/port.h>
#include <port.h>
#define PORT_FIRED 0x69
#define PORT_UNUSED 0x0
#define PORT_LOADED 0x99
#define PORT_DELETED -1
#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
#define PROCFS_FILE_OFFSET_BITS_HACK 1
#undef _FILE_OFFSET_BITS
#else
#define PROCFS_FILE_OFFSET_BITS_HACK 0
#endif
#include <procfs.h>
#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
#define _FILE_OFFSET_BITS 64
#endif
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
int err;
int fd;
loop->fs_fd = -1;
loop->backend_fd = -1;
fd = port_create();
if (fd == -1)
return -errno;
err = uv__cloexec(fd, 1);
if (err) {
uv__close(fd);
return err;
}
loop->backend_fd = fd;
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->fs_fd != -1) {
uv__close(loop->fs_fd);
loop->fs_fd = -1;
}
if (loop->backend_fd != -1) {
uv__close(loop->backend_fd);
loop->backend_fd = -1;
}
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct port_event* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
events = (struct port_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].portev_object == fd)
events[i].portev_object = -1;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
QUEUE* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
unsigned int nfds;
unsigned int i;
int saved_errno;
int nevents;
int count;
int fd;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0))
abort();
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
/* Work around a kernel bug where nfds is not updated. */
events[0].portev_source = 0;
nfds = 1;
saved_errno = 0;
if (port_getn(loop->backend_fd,
events,
ARRAY_SIZE(events),
&nfds,
timeout == -1 ? NULL : &spec)) {
/* Work around another kernel bug: port_getn() may return events even
* on error.
*/
if (errno == EINTR || errno == ETIME)
saved_errno = errno;
else
abort();
}
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (events[0].portev_source == 0) {
if (timeout == 0)
return;
if (timeout == -1)
continue;
goto update_timeout;
}
if (nfds == 0) {
assert(timeout != -1);
return;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->portev_object;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
/* File descriptor that we've stopped watching, ignore. */
if (w == NULL)
continue;
w->cb(loop, w, pe->portev_events);
nevents++;
if (w != loop->watchers[fd])
continue; /* Disabled by callback. */
/* Events Ports operates in oneshot mode, rearm timer on next run. */
if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (saved_errno == ETIME) {
assert(timeout != -1);
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
uint64_t uv__hrtime(uv_clocktype_t type) {
return gethrtime();
}
/*
* We could use a static buffer for the path manipulations that we need outside
* of the function, but this function could be called by multiple consumers and
* we don't want to potentially create a race condition in the use of snprintf.
*/
int uv_exepath(char* buffer, size_t* size) {
ssize_t res;
char buf[128];
if (buffer == NULL || size == NULL)
return -EINVAL;
snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
res = readlink(buf, buffer, *size - 1);
if (res == -1)
return -errno;
buffer[res] = '\0';
*size = res;
return 0;
}
uint64_t uv_get_free_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
uint64_t uv_get_total_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
}
void uv_loadavg(double avg[3]) {
(void) getloadavg(avg, 3);
}
#if defined(PORT_SOURCE_FILE)
static int uv__fs_event_rearm(uv_fs_event_t *handle) {
if (handle->fd == -1)
return -EBADF;
if (port_associate(handle->loop->fs_fd,
PORT_SOURCE_FILE,
(uintptr_t) &handle->fo,
FILE_ATTRIB | FILE_MODIFIED,
handle) == -1) {
return -errno;
}
handle->fd = PORT_LOADED;
return 0;
}
static void uv__fs_event_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents) {
uv_fs_event_t *handle = NULL;
timespec_t timeout;
port_event_t pe;
int events;
int r;
(void) w;
(void) revents;
do {
uint_t n = 1;
/*
* Note that our use of port_getn() here (and not port_get()) is deliberate:
* there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
* causes port_get() to return success instead of ETIME when there aren't
* actually any events (!); by using port_getn() in lieu of port_get(),
* we can at least workaround the bug by checking for zero returned events
* and treating it as we would ETIME.
*/
do {
memset(&timeout, 0, sizeof timeout);
r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
}
while (r == -1 && errno == EINTR);
if ((r == -1 && errno == ETIME) || n == 0)
break;
handle = (uv_fs_event_t*) pe.portev_user;
assert((r == 0) && "unexpected port_get() error");
events = 0;
if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
events |= UV_CHANGE;
if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
events |= UV_RENAME;
assert(events != 0);
handle->fd = PORT_FIRED;
handle->cb(handle, NULL, events, 0);
if (handle->fd != PORT_DELETED) {
r = uv__fs_event_rearm(handle);
if (r != 0)
handle->cb(handle, NULL, 0, r);
}
}
while (handle->fd != PORT_DELETED);
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
int portfd;
int first_run;
int err;
if (uv__is_active(handle))
return -EINVAL;
first_run = 0;
if (handle->loop->fs_fd == -1) {
portfd = port_create();
if (portfd == -1)
return -errno;
handle->loop->fs_fd = portfd;
first_run = 1;
}
uv__handle_start(handle);
handle->path = strdup(path);
handle->fd = PORT_UNUSED;
handle->cb = cb;
memset(&handle->fo, 0, sizeof handle->fo);
handle->fo.fo_name = handle->path;
err = uv__fs_event_rearm(handle);
if (err != 0)
return err;
if (first_run) {
uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
uv__io_start(handle->loop, &handle->loop->fs_event_watcher, UV__POLLIN);
}
return 0;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
if (!uv__is_active(handle))
return 0;
if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) {
port_dissociate(handle->loop->fs_fd,
PORT_SOURCE_FILE,
(uintptr_t) &handle->fo);
}
handle->fd = PORT_DELETED;
free(handle->path);
handle->path = NULL;
handle->fo.fo_name = NULL;
uv__handle_stop(handle);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}
#else /* !defined(PORT_SOURCE_FILE) */
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
return -ENOSYS;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* filename,
unsigned int flags) {
return -ENOSYS;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
return -ENOSYS;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
UNREACHABLE();
}
#endif /* defined(PORT_SOURCE_FILE) */
char** uv_setup_args(int argc, char** argv) {
return argv;
}
int uv_set_process_title(const char* title) {
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (size > 0) {
buffer[0] = '\0';
}
return 0;
}
int uv_resident_set_memory(size_t* rss) {
psinfo_t psinfo;
int err;
int fd;
fd = open("/proc/self/psinfo", O_RDONLY);
if (fd == -1)
return -errno;
/* FIXME(bnoordhuis) Handle EINTR. */
err = -EINVAL;
if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
*rss = (size_t)psinfo.pr_rssize * 1024;
err = 0;
}
uv__close(fd);
return err;
}
int uv_uptime(double* uptime) {
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_named_t *knp;
long hz = sysconf(_SC_CLK_TCK);
kc = kstat_open();
if (kc == NULL)
return -EPERM;
ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
if (kstat_read(kc, ksp, NULL) == -1) {
*uptime = -1;
} else {
knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr");
*uptime = knp->value.ul / hz;
}
kstat_close(kc);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
int lookup_instance;
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_named_t *knp;
uv_cpu_info_t* cpu_info;
kc = kstat_open();
if (kc == NULL)
return -EPERM;
/* Get count of cpus */
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
lookup_instance++;
}
*cpu_infos = malloc(lookup_instance * sizeof(**cpu_infos));
if (!(*cpu_infos)) {
kstat_close(kc);
return -ENOMEM;
}
*count = lookup_instance;
cpu_info = *cpu_infos;
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
if (kstat_read(kc, ksp, NULL) == -1) {
cpu_info->speed = 0;
cpu_info->model = NULL;
} else {
knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
assert(knp->data_type == KSTAT_DATA_INT32 ||
knp->data_type == KSTAT_DATA_INT64);
cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
: knp->value.i64;
knp = kstat_data_lookup(ksp, (char*) "brand");
assert(knp->data_type == KSTAT_DATA_STRING);
cpu_info->model = strdup(KSTAT_NAMED_STR_PTR(knp));
}
lookup_instance++;
cpu_info++;
}
cpu_info = *cpu_infos;
lookup_instance = 0;
for (;;) {
ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
if (ksp == NULL)
break;
if (kstat_read(kc, ksp, NULL) == -1) {
cpu_info->cpu_times.user = 0;
cpu_info->cpu_times.nice = 0;
cpu_info->cpu_times.sys = 0;
cpu_info->cpu_times.idle = 0;
cpu_info->cpu_times.irq = 0;
} else {
knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.user = knp->value.ui64;
knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.sys = knp->value.ui64;
knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.idle = knp->value.ui64;
knp = kstat_data_lookup(ksp, (char*) "intr");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.irq = knp->value.ui64;
cpu_info->cpu_times.nice = 0;
}
lookup_instance++;
cpu_info++;
}
kstat_close(kc);
return 0;
}
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
int i;
for (i = 0; i < count; i++) {
free(cpu_infos[i].model);
}
free(cpu_infos);
}
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
#ifdef SUNOS_NO_IFADDRS
return -ENOSYS;
#else
uv_interface_address_t* address;
struct sockaddr_dl* sa_addr;
struct ifaddrs* addrs;
struct ifaddrs* ent;
int i;
if (getifaddrs(&addrs))
return -errno;
*count = 0;
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family == PF_PACKET)) {
continue;
}
(*count)++;
}
*addresses = malloc(*count * sizeof(**addresses));
if (!(*addresses))
return -ENOMEM;
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
continue;
if (ent->ifa_addr == NULL)
continue;
address->name = strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
(ent->ifa_flags & IFF_LOOPBACK));
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
(ent->ifa_addr == NULL) ||
(ent->ifa_addr->sa_family != AF_LINK)) {
continue;
}
address = *addresses;
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
#endif /* SUNOS_NO_IFADDRS */
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
free(addresses[i].name);
}
free(addresses);
}

View File

@ -0,0 +1,321 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP);
return 0;
}
static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) {
int sockfd;
int err;
if (uv__stream_fd(handle) != -1)
return 0;
err = uv__socket(domain, SOCK_STREAM, 0);
if (err < 0)
return err;
sockfd = err;
err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
if (err) {
uv__close(sockfd);
return err;
}
return 0;
}
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
int err;
int on;
/* Cannot set IPv6-only mode on non-IPv6 socket. */
if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
return -EINVAL;
err = maybe_new_socket(tcp,
addr->sa_family,
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
if (err)
return err;
on = 1;
if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
return -errno;
#ifdef IPV6_V6ONLY
if (addr->sa_family == AF_INET6) {
on = (flags & UV_TCP_IPV6ONLY) != 0;
if (setsockopt(tcp->io_watcher.fd,
IPPROTO_IPV6,
IPV6_V6ONLY,
&on,
sizeof on) == -1) {
return -errno;
}
}
#endif
errno = 0;
if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE)
return -errno;
tcp->delayed_error = -errno;
if (addr->sa_family == AF_INET6)
tcp->flags |= UV_HANDLE_IPV6;
return 0;
}
int uv__tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
uv_connect_cb cb) {
int err;
int r;
assert(handle->type == UV_TCP);
if (handle->connect_req != NULL)
return -EALREADY; /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */
err = maybe_new_socket(handle,
addr->sa_family,
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
if (err)
return err;
handle->delayed_error = 0;
do
r = connect(uv__stream_fd(handle), addr, addrlen);
while (r == -1 && errno == EINTR);
if (r == -1) {
if (errno == EINPROGRESS)
; /* not an error */
else if (errno == ECONNREFUSED)
/* If we get a ECONNREFUSED wait until the next tick to report the
* error. Solaris wants to report immediately--other unixes want to
* wait.
*/
handle->delayed_error = -errno;
else
return -errno;
}
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
QUEUE_INIT(&req->queue);
handle->connect_req = req;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT);
if (handle->delayed_error)
uv__io_feed(handle->loop, &handle->io_watcher);
return 0;
}
int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
return uv__stream_open((uv_stream_t*)handle,
sock,
UV_STREAM_READABLE | UV_STREAM_WRITABLE);
}
int uv_tcp_getsockname(const uv_tcp_t* handle,
struct sockaddr* name,
int* namelen) {
socklen_t socklen;
if (handle->delayed_error)
return handle->delayed_error;
if (uv__stream_fd(handle) < 0)
return -EINVAL; /* FIXME(bnoordhuis) -EBADF */
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t) *namelen;
if (getsockname(uv__stream_fd(handle), name, &socklen))
return -errno;
*namelen = (int) socklen;
return 0;
}
int uv_tcp_getpeername(const uv_tcp_t* handle,
struct sockaddr* name,
int* namelen) {
socklen_t socklen;
if (handle->delayed_error)
return handle->delayed_error;
if (uv__stream_fd(handle) < 0)
return -EINVAL; /* FIXME(bnoordhuis) -EBADF */
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t) *namelen;
if (getpeername(uv__stream_fd(handle), name, &socklen))
return -errno;
*namelen = (int) socklen;
return 0;
}
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
static int single_accept = -1;
int err;
if (tcp->delayed_error)
return tcp->delayed_error;
if (single_accept == -1) {
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
}
if (single_accept)
tcp->flags |= UV_TCP_SINGLE_ACCEPT;
err = maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE);
if (err)
return err;
if (listen(tcp->io_watcher.fd, backlog))
return -errno;
tcp->connection_cb = cb;
/* Start listening for connections. */
tcp->io_watcher.cb = uv__server_io;
uv__io_start(tcp->loop, &tcp->io_watcher, UV__POLLIN);
return 0;
}
int uv__tcp_nodelay(int fd, int on) {
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)))
return -errno;
return 0;
}
int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
return -errno;
#ifdef TCP_KEEPIDLE
if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay)))
return -errno;
#endif
/* Solaris/SmartOS, if you don't support keep-alive,
* then don't advertise it in your system headers...
*/
/* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */
#if defined(TCP_KEEPALIVE) && !defined(__sun)
if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay)))
return -errno;
#endif
return 0;
}
int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
int err;
if (uv__stream_fd(handle) != -1) {
err = uv__tcp_nodelay(uv__stream_fd(handle), on);
if (err)
return err;
}
if (on)
handle->flags |= UV_TCP_NODELAY;
else
handle->flags &= ~UV_TCP_NODELAY;
return 0;
}
int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
int err;
if (uv__stream_fd(handle) != -1) {
err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay);
if (err)
return err;
}
if (on)
handle->flags |= UV_TCP_KEEPALIVE;
else
handle->flags &= ~UV_TCP_KEEPALIVE;
/* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
* uv_tcp_t with an int that's almost never used...
*/
return 0;
}
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
if (enable)
handle->flags &= ~UV_TCP_SINGLE_ACCEPT;
else
handle->flags |= UV_TCP_SINGLE_ACCEPT;
return 0;
}
void uv__tcp_close(uv_tcp_t* handle) {
uv__stream_close((uv_stream_t*)handle);
}

View File

@ -0,0 +1,469 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <pthread.h>
#include <assert.h>
#include <errno.h>
#include <sys/time.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
int uv_thread_join(uv_thread_t *tid) {
return -pthread_join(*tid, NULL);
}
int uv_mutex_init(uv_mutex_t* mutex) {
#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
return -pthread_mutex_init(mutex, NULL);
#else
pthread_mutexattr_t attr;
int err;
if (pthread_mutexattr_init(&attr))
abort();
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
abort();
err = pthread_mutex_init(mutex, &attr);
if (pthread_mutexattr_destroy(&attr))
abort();
return -err;
#endif
}
void uv_mutex_destroy(uv_mutex_t* mutex) {
if (pthread_mutex_destroy(mutex))
abort();
}
void uv_mutex_lock(uv_mutex_t* mutex) {
if (pthread_mutex_lock(mutex))
abort();
}
int uv_mutex_trylock(uv_mutex_t* mutex) {
int err;
/* FIXME(bnoordhuis) EAGAIN means recursive lock limit reached. Arguably
* a bug, should probably abort rather than return -EAGAIN.
*/
err = pthread_mutex_trylock(mutex);
if (err && err != EBUSY && err != EAGAIN)
abort();
return -err;
}
void uv_mutex_unlock(uv_mutex_t* mutex) {
if (pthread_mutex_unlock(mutex))
abort();
}
int uv_rwlock_init(uv_rwlock_t* rwlock) {
return -pthread_rwlock_init(rwlock, NULL);
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
if (pthread_rwlock_destroy(rwlock))
abort();
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_rdlock(rwlock))
abort();
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
int err;
err = pthread_rwlock_tryrdlock(rwlock);
if (err && err != EBUSY && err != EAGAIN)
abort();
return -err;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_unlock(rwlock))
abort();
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_wrlock(rwlock))
abort();
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
int err;
err = pthread_rwlock_trywrlock(rwlock);
if (err && err != EBUSY && err != EAGAIN)
abort();
return -err;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_unlock(rwlock))
abort();
}
void uv_once(uv_once_t* guard, void (*callback)(void)) {
if (pthread_once(guard, callback))
abort();
}
#if defined(__APPLE__) && defined(__MACH__)
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
kern_return_t err;
err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
if (err == KERN_SUCCESS)
return 0;
if (err == KERN_INVALID_ARGUMENT)
return -EINVAL;
if (err == KERN_RESOURCE_SHORTAGE)
return -ENOMEM;
abort();
return -EINVAL; /* Satisfy the compiler. */
}
void uv_sem_destroy(uv_sem_t* sem) {
if (semaphore_destroy(mach_task_self(), *sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (semaphore_signal(*sem))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
int r;
do
r = semaphore_wait(*sem);
while (r == KERN_ABORTED);
if (r != KERN_SUCCESS)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
mach_timespec_t interval;
kern_return_t err;
interval.tv_sec = 0;
interval.tv_nsec = 0;
err = semaphore_timedwait(*sem, interval);
if (err == KERN_SUCCESS)
return 0;
if (err == KERN_OPERATION_TIMED_OUT)
return -EAGAIN;
abort();
return -EINVAL; /* Satisfy the compiler. */
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
if (sem_init(sem, 0, value))
return -errno;
return 0;
}
void uv_sem_destroy(uv_sem_t* sem) {
if (sem_destroy(sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (sem_post(sem))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
int r;
do
r = sem_wait(sem);
while (r == -1 && errno == EINTR);
if (r)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
int r;
do
r = sem_trywait(sem);
while (r == -1 && errno == EINTR);
if (r) {
if (errno == EAGAIN)
return -EAGAIN;
abort();
}
return 0;
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
#if defined(__APPLE__) && defined(__MACH__)
int uv_cond_init(uv_cond_t* cond) {
return -pthread_cond_init(cond, NULL);
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_cond_init(uv_cond_t* cond) {
pthread_condattr_t attr;
int err;
err = pthread_condattr_init(&attr);
if (err)
return -err;
#if !defined(__ANDROID__)
err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
if (err)
goto error2;
#endif
err = pthread_cond_init(cond, &attr);
if (err)
goto error2;
err = pthread_condattr_destroy(&attr);
if (err)
goto error;
return 0;
error:
pthread_cond_destroy(cond);
error2:
pthread_condattr_destroy(&attr);
return -err;
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
void uv_cond_destroy(uv_cond_t* cond) {
if (pthread_cond_destroy(cond))
abort();
}
void uv_cond_signal(uv_cond_t* cond) {
if (pthread_cond_signal(cond))
abort();
}
void uv_cond_broadcast(uv_cond_t* cond) {
if (pthread_cond_broadcast(cond))
abort();
}
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (pthread_cond_wait(cond, mutex))
abort();
}
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
int r;
struct timespec ts;
#if defined(__APPLE__) && defined(__MACH__)
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
timeout += uv__hrtime(UV_CLOCK_PRECISE);
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
#if defined(__ANDROID__)
/*
* The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
* but has this alternative function instead.
*/
r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
#else
r = pthread_cond_timedwait(cond, mutex, &ts);
#endif /* __ANDROID__ */
#endif
if (r == 0)
return 0;
if (r == ETIMEDOUT)
return -ETIMEDOUT;
abort();
return -EINVAL; /* Satisfy the compiler. */
}
#if defined(__APPLE__) && defined(__MACH__)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
int err;
barrier->n = count;
barrier->count = 0;
err = uv_mutex_init(&barrier->mutex);
if (err)
return -err;
err = uv_sem_init(&barrier->turnstile1, 0);
if (err)
goto error2;
err = uv_sem_init(&barrier->turnstile2, 1);
if (err)
goto error;
return 0;
error:
uv_sem_destroy(&barrier->turnstile1);
error2:
uv_mutex_destroy(&barrier->mutex);
return -err;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
uv_sem_destroy(&barrier->turnstile2);
uv_sem_destroy(&barrier->turnstile1);
uv_mutex_destroy(&barrier->mutex);
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int serial_thread;
uv_mutex_lock(&barrier->mutex);
if (++barrier->count == barrier->n) {
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile1);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile1);
uv_mutex_lock(&barrier->mutex);
serial_thread = (--barrier->count == 0);
if (serial_thread) {
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile2);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile2);
return serial_thread;
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return -pthread_barrier_init(barrier, NULL, count);
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int r = pthread_barrier_wait(barrier);
if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return r == PTHREAD_BARRIER_SERIAL_THREAD;
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
int uv_key_create(uv_key_t* key) {
return -pthread_key_create(key, NULL);
}
void uv_key_delete(uv_key_t* key) {
if (pthread_key_delete(*key))
abort();
}
void* uv_key_get(uv_key_t* key) {
return pthread_getspecific(*key);
}
void uv_key_set(uv_key_t* key, void* value) {
if (pthread_setspecific(*key, value))
abort();
}

View File

@ -0,0 +1,169 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include "heap-inl.h"
#include <assert.h>
#include <limits.h>
static int timer_less_than(const struct heap_node* ha,
const struct heap_node* hb) {
const uv_timer_t* a;
const uv_timer_t* b;
a = container_of(ha, const uv_timer_t, heap_node);
b = container_of(hb, const uv_timer_t, heap_node);
if (a->timeout < b->timeout)
return 1;
if (b->timeout < a->timeout)
return 0;
/* Compare start_id when both have the same timeout. start_id is
* allocated with loop->timer_counter in uv_timer_start().
*/
if (a->start_id < b->start_id)
return 1;
if (b->start_id < a->start_id)
return 0;
return 0;
}
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
handle->timer_cb = NULL;
handle->repeat = 0;
return 0;
}
int uv_timer_start(uv_timer_t* handle,
uv_timer_cb cb,
uint64_t timeout,
uint64_t repeat) {
uint64_t clamped_timeout;
if (uv__is_active(handle))
uv_timer_stop(handle);
clamped_timeout = handle->loop->time + timeout;
if (clamped_timeout < timeout)
clamped_timeout = (uint64_t) -1;
handle->timer_cb = cb;
handle->timeout = clamped_timeout;
handle->repeat = repeat;
/* start_id is the second index to be compared in uv__timer_cmp() */
handle->start_id = handle->loop->timer_counter++;
heap_insert((struct heap*) &handle->loop->timer_heap,
(struct heap_node*) &handle->heap_node,
timer_less_than);
uv__handle_start(handle);
return 0;
}
int uv_timer_stop(uv_timer_t* handle) {
if (!uv__is_active(handle))
return 0;
heap_remove((struct heap*) &handle->loop->timer_heap,
(struct heap_node*) &handle->heap_node,
timer_less_than);
uv__handle_stop(handle);
return 0;
}
int uv_timer_again(uv_timer_t* handle) {
if (handle->timer_cb == NULL)
return -EINVAL;
if (handle->repeat) {
uv_timer_stop(handle);
uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
}
return 0;
}
void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
handle->repeat = repeat;
}
uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
return handle->repeat;
}
int uv__next_timeout(const uv_loop_t* loop) {
const struct heap_node* heap_node;
const uv_timer_t* handle;
uint64_t diff;
heap_node = heap_min((const struct heap*) &loop->timer_heap);
if (heap_node == NULL)
return -1; /* block indefinitely */
handle = container_of(heap_node, const uv_timer_t, heap_node);
if (handle->timeout <= loop->time)
return 0;
diff = handle->timeout - loop->time;
if (diff > INT_MAX)
diff = INT_MAX;
return diff;
}
void uv__run_timers(uv_loop_t* loop) {
struct heap_node* heap_node;
uv_timer_t* handle;
for (;;) {
heap_node = heap_min((struct heap*) &loop->timer_heap);
if (heap_node == NULL)
break;
handle = container_of(heap_node, uv_timer_t, heap_node);
if (handle->timeout > loop->time)
break;
uv_timer_stop(handle);
uv_timer_again(handle);
handle->timer_cb(handle);
}
}
void uv__timer_close(uv_timer_t* handle) {
uv_timer_stop(handle);
}

View File

@ -0,0 +1,229 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include "spinlock.h"
#include <assert.h>
#include <unistd.h>
#include <termios.h>
#include <errno.h>
#include <sys/ioctl.h>
static int orig_termios_fd = -1;
static struct termios orig_termios;
static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
int flags;
int newfd;
int r;
flags = 0;
newfd = -1;
uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
/* Reopen the file descriptor when it refers to a tty. This lets us put the
* tty in non-blocking mode without affecting other processes that share it
* with us.
*
* Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also
* affects fd 1 of `cat` because both file descriptors refer to the same
* struct file in the kernel. When we reopen our fd 0, it points to a
* different struct file, hence changing its properties doesn't affect
* other processes.
*/
if (isatty(fd)) {
r = uv__open_cloexec("/dev/tty", O_RDWR);
if (r < 0) {
/* fallback to using blocking writes */
if (!readable)
flags |= UV_STREAM_BLOCKING;
goto skip;
}
newfd = r;
r = uv__dup2_cloexec(newfd, fd);
if (r < 0 && r != -EINVAL) {
/* EINVAL means newfd == fd which could conceivably happen if another
* thread called close(fd) between our calls to isatty() and open().
* That's a rather unlikely event but let's handle it anyway.
*/
uv__close(newfd);
return r;
}
fd = newfd;
}
skip:
#if defined(__APPLE__)
r = uv__stream_try_select((uv_stream_t*) tty, &fd);
if (r) {
if (newfd != -1)
uv__close(newfd);
return r;
}
#endif
if (readable)
flags |= UV_STREAM_READABLE;
else
flags |= UV_STREAM_WRITABLE;
if (!(flags & UV_STREAM_BLOCKING))
uv__nonblock(fd, 1);
uv__stream_open((uv_stream_t*) tty, fd, flags);
tty->mode = 0;
return 0;
}
int uv_tty_set_mode(uv_tty_t* tty, int mode) {
struct termios raw;
int fd;
fd = uv__stream_fd(tty);
if (mode && tty->mode == 0) { /* on */
if (tcgetattr(fd, &tty->orig_termios))
return -errno;
/* This is used for uv_tty_reset_mode() */
uv_spinlock_lock(&termios_spinlock);
if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios;
orig_termios_fd = fd;
}
uv_spinlock_unlock(&termios_spinlock);
raw = tty->orig_termios;
raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
raw.c_oflag |= (ONLCR);
raw.c_cflag |= (CS8);
raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
raw.c_cc[VMIN] = 1;
raw.c_cc[VTIME] = 0;
/* Put terminal in raw mode after draining */
if (tcsetattr(fd, TCSADRAIN, &raw))
return -errno;
tty->mode = 1;
} else if (mode == 0 && tty->mode) { /* off */
/* Put terminal in original mode after flushing */
if (tcsetattr(fd, TCSAFLUSH, &tty->orig_termios))
return -errno;
tty->mode = 0;
}
return 0;
}
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
struct winsize ws;
if (ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws))
return -errno;
*width = ws.ws_col;
*height = ws.ws_row;
return 0;
}
uv_handle_type uv_guess_handle(uv_file file) {
struct sockaddr sa;
struct stat s;
socklen_t len;
int type;
if (file < 0)
return UV_UNKNOWN_HANDLE;
if (isatty(file))
return UV_TTY;
if (fstat(file, &s))
return UV_UNKNOWN_HANDLE;
if (S_ISREG(s.st_mode))
return UV_FILE;
if (S_ISCHR(s.st_mode))
return UV_FILE; /* XXX UV_NAMED_PIPE? */
if (S_ISFIFO(s.st_mode))
return UV_NAMED_PIPE;
if (!S_ISSOCK(s.st_mode))
return UV_UNKNOWN_HANDLE;
len = sizeof(type);
if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
return UV_UNKNOWN_HANDLE;
len = sizeof(sa);
if (getsockname(file, &sa, &len))
return UV_UNKNOWN_HANDLE;
if (type == SOCK_DGRAM)
if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
return UV_UDP;
if (type == SOCK_STREAM) {
if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
return UV_TCP;
if (sa.sa_family == AF_UNIX)
return UV_NAMED_PIPE;
}
return UV_UNKNOWN_HANDLE;
}
/* This function is async signal-safe, meaning that it's safe to call from
* inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s
* critical section when the signal was raised.
*/
int uv_tty_reset_mode(void) {
int err;
if (!uv_spinlock_trylock(&termios_spinlock))
return -EBUSY; /* In uv_tty_set_mode(). */
err = 0;
if (orig_termios_fd != -1)
if (tcsetattr(orig_termios_fd, TCSANOW, &orig_termios))
err = -errno;
uv_spinlock_unlock(&termios_spinlock);
return err;
}

View File

@ -0,0 +1,758 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
#endif
#if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
#endif
static void uv__udp_run_completed(uv_udp_t* handle);
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static void uv__udp_recvmsg(uv_udp_t* handle);
static void uv__udp_sendmsg(uv_udp_t* handle);
static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
int domain,
unsigned int flags);
void uv__udp_close(uv_udp_t* handle) {
uv__io_close(handle->loop, &handle->io_watcher);
uv__handle_stop(handle);
if (handle->io_watcher.fd != -1) {
uv__close(handle->io_watcher.fd);
handle->io_watcher.fd = -1;
}
}
void uv__udp_finish_close(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT));
assert(handle->io_watcher.fd == -1);
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
QUEUE_REMOVE(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req->status = -ECANCELED;
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
}
uv__udp_run_completed(handle);
assert(handle->send_queue_size == 0);
assert(handle->send_queue_count == 0);
/* Now tear down the handle. */
handle->recv_cb = NULL;
handle->alloc_cb = NULL;
/* but _do not_ touch close_cb */
}
static void uv__udp_run_completed(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
q = QUEUE_HEAD(&handle->write_completed_queue);
QUEUE_REMOVE(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
handle->send_queue_count--;
if (req->bufs != req->bufsml)
free(req->bufs);
req->bufs = NULL;
if (req->send_cb == NULL)
continue;
/* req->status >= 0 == bytes written
* req->status < 0 == errno
*/
if (req->status >= 0)
req->send_cb(req, 0);
else
req->send_cb(req, req->status);
}
if (QUEUE_EMPTY(&handle->write_queue)) {
/* Pending queue and completion queue empty, stop watcher. */
uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLOUT);
if (!uv__io_active(&handle->io_watcher, UV__POLLIN))
uv__handle_stop(handle);
}
}
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
uv_udp_t* handle;
handle = container_of(w, uv_udp_t, io_watcher);
assert(handle->type == UV_UDP);
if (revents & UV__POLLIN)
uv__udp_recvmsg(handle);
if (revents & UV__POLLOUT) {
uv__udp_sendmsg(handle);
uv__udp_run_completed(handle);
}
}
static void uv__udp_recvmsg(uv_udp_t* handle) {
struct sockaddr_storage peer;
struct msghdr h;
ssize_t nread;
uv_buf_t buf;
int flags;
int count;
assert(handle->recv_cb != NULL);
assert(handle->alloc_cb != NULL);
/* Prevent loop starvation when the data comes in as fast as (or faster than)
* we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
*/
count = 32;
memset(&h, 0, sizeof(h));
h.msg_name = &peer;
do {
handle->alloc_cb((uv_handle_t*) handle, 64 * 1024, &buf);
if (buf.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
return;
}
assert(buf.base != NULL);
h.msg_namelen = sizeof(peer);
h.msg_iov = (void*) &buf;
h.msg_iovlen = 1;
do {
nread = recvmsg(handle->io_watcher.fd, &h, 0);
}
while (nread == -1 && errno == EINTR);
if (nread == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
handle->recv_cb(handle, 0, &buf, NULL, 0);
else
handle->recv_cb(handle, -errno, &buf, NULL, 0);
}
else {
const struct sockaddr *addr;
if (h.msg_namelen == 0)
addr = NULL;
else
addr = (const struct sockaddr*) &peer;
flags = 0;
if (h.msg_flags & MSG_TRUNC)
flags |= UV_UDP_PARTIAL;
handle->recv_cb(handle, nread, &buf, addr, flags);
}
}
/* recv_cb callback may decide to pause or close the handle */
while (nread != -1
&& count-- > 0
&& handle->io_watcher.fd != -1
&& handle->recv_cb != NULL);
}
static void uv__udp_sendmsg(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
struct msghdr h;
ssize_t size;
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
assert(req != NULL);
memset(&h, 0, sizeof h);
h.msg_name = &req->addr;
h.msg_namelen = (req->addr.ss_family == AF_INET6 ?
sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in));
h.msg_iov = (struct iovec*) req->bufs;
h.msg_iovlen = req->nbufs;
do {
size = sendmsg(handle->io_watcher.fd, &h, 0);
} while (size == -1 && errno == EINTR);
if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))
break;
req->status = (size == -1 ? -errno : size);
/* Sending a datagram is an atomic operation: either all data
* is written or nothing is (and EMSGSIZE is raised). That is
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
QUEUE_REMOVE(&req->queue);
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher);
}
}
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
* refinements for programs that use multicast.
*
* Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
* are different from the BSDs: it _shares_ the port rather than steal it
* from the current listener. While useful, it's not something we can emulate
* on other platforms so we don't enable it.
*/
static int uv__set_reuse(int fd) {
int yes;
#if defined(SO_REUSEPORT) && !defined(__linux__)
yes = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return -errno;
#else
yes = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
return -errno;
#endif
return 0;
}
int uv__udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
int err;
int yes;
int fd;
err = -EINVAL;
fd = -1;
/* Check for bad flags. */
if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR))
return -EINVAL;
/* Cannot set IPv6-only mode on non-IPv6 socket. */
if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
return -EINVAL;
fd = handle->io_watcher.fd;
if (fd == -1) {
err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
if (err < 0)
return err;
fd = err;
handle->io_watcher.fd = fd;
}
if (flags & UV_UDP_REUSEADDR) {
err = uv__set_reuse(fd);
if (err)
goto out;
}
if (flags & UV_UDP_IPV6ONLY) {
#ifdef IPV6_V6ONLY
yes = 1;
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
err = -errno;
goto out;
}
#else
err = -ENOTSUP;
goto out;
#endif
}
if (bind(fd, addr, addrlen)) {
err = -errno;
goto out;
}
if (addr->sa_family == AF_INET6)
handle->flags |= UV_HANDLE_IPV6;
return 0;
out:
uv__close(handle->io_watcher.fd);
handle->io_watcher.fd = -1;
return err;
}
static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
int domain,
unsigned int flags) {
unsigned char taddr[sizeof(struct sockaddr_in6)];
socklen_t addrlen;
if (handle->io_watcher.fd != -1)
return 0;
switch (domain) {
case AF_INET:
{
struct sockaddr_in* addr = (void*)&taddr;
memset(addr, 0, sizeof *addr);
addr->sin_family = AF_INET;
addr->sin_addr.s_addr = INADDR_ANY;
addrlen = sizeof *addr;
break;
}
case AF_INET6:
{
struct sockaddr_in6* addr = (void*)&taddr;
memset(addr, 0, sizeof *addr);
addr->sin6_family = AF_INET6;
addr->sin6_addr = in6addr_any;
addrlen = sizeof *addr;
break;
}
default:
assert(0 && "unsupported address family");
abort();
}
return uv__udp_bind(handle, (const struct sockaddr*) &taddr, addrlen, flags);
}
int uv__udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
unsigned int addrlen,
uv_udp_send_cb send_cb) {
int err;
int empty_queue;
assert(nbufs > 0);
err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
if (err)
return err;
/* It's legal for send_queue_count > 0 even when the write_queue is empty;
* it means there are error-state requests in the write_completed_queue that
* will touch up send_queue_size/count later.
*/
empty_queue = (handle->send_queue_count == 0);
uv__req_init(handle->loop, req, UV_UDP_SEND);
assert(addrlen <= sizeof(req->addr));
memcpy(&req->addr, addr, addrlen);
req->send_cb = send_cb;
req->handle = handle;
req->nbufs = nbufs;
req->bufs = req->bufsml;
if (nbufs > ARRAY_SIZE(req->bufsml))
req->bufs = malloc(nbufs * sizeof(bufs[0]));
if (req->bufs == NULL)
return -ENOMEM;
memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
handle->send_queue_count++;
QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
uv__handle_start(handle);
if (empty_queue)
uv__udp_sendmsg(handle);
else
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT);
return 0;
}
int uv__udp_try_send(uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
unsigned int addrlen) {
int err;
struct msghdr h;
ssize_t size;
assert(nbufs > 0);
/* already sending a message */
if (handle->send_queue_count != 0)
return -EAGAIN;
err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
if (err)
return err;
memset(&h, 0, sizeof h);
h.msg_name = (struct sockaddr*) addr;
h.msg_namelen = addrlen;
h.msg_iov = (struct iovec*) bufs;
h.msg_iovlen = nbufs;
do {
size = sendmsg(handle->io_watcher.fd, &h, 0);
} while (size == -1 && errno == EINTR);
if (size == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
return -EAGAIN;
else
return -errno;
}
return size;
}
static int uv__udp_set_membership4(uv_udp_t* handle,
const struct sockaddr_in* multicast_addr,
const char* interface_addr,
uv_membership membership) {
struct ip_mreq mreq;
int optname;
int err;
memset(&mreq, 0, sizeof mreq);
if (interface_addr) {
err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
if (err)
return err;
} else {
mreq.imr_interface.s_addr = htonl(INADDR_ANY);
}
mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
switch (membership) {
case UV_JOIN_GROUP:
optname = IP_ADD_MEMBERSHIP;
break;
case UV_LEAVE_GROUP:
optname = IP_DROP_MEMBERSHIP;
break;
default:
return -EINVAL;
}
if (setsockopt(handle->io_watcher.fd,
IPPROTO_IP,
optname,
&mreq,
sizeof(mreq))) {
return -errno;
}
return 0;
}
static int uv__udp_set_membership6(uv_udp_t* handle,
const struct sockaddr_in6* multicast_addr,
const char* interface_addr,
uv_membership membership) {
int optname;
struct ipv6_mreq mreq;
struct sockaddr_in6 addr6;
memset(&mreq, 0, sizeof mreq);
if (interface_addr) {
if (uv_ip6_addr(interface_addr, 0, &addr6))
return -EINVAL;
mreq.ipv6mr_interface = addr6.sin6_scope_id;
} else {
mreq.ipv6mr_interface = 0;
}
mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
switch (membership) {
case UV_JOIN_GROUP:
optname = IPV6_ADD_MEMBERSHIP;
break;
case UV_LEAVE_GROUP:
optname = IPV6_DROP_MEMBERSHIP;
break;
default:
return -EINVAL;
}
if (setsockopt(handle->io_watcher.fd,
IPPROTO_IPV6,
optname,
&mreq,
sizeof(mreq))) {
return -errno;
}
return 0;
}
int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
handle->alloc_cb = NULL;
handle->recv_cb = NULL;
handle->send_queue_size = 0;
handle->send_queue_count = 0;
uv__io_init(&handle->io_watcher, uv__udp_io, -1);
QUEUE_INIT(&handle->write_queue);
QUEUE_INIT(&handle->write_completed_queue);
return 0;
}
int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
int err;
/* Check for already active socket. */
if (handle->io_watcher.fd != -1)
return -EALREADY; /* FIXME(bnoordhuis) Should be -EBUSY. */
err = uv__set_reuse(sock);
if (err)
return err;
handle->io_watcher.fd = sock;
return 0;
}
int uv_udp_set_membership(uv_udp_t* handle,
const char* multicast_addr,
const char* interface_addr,
uv_membership membership) {
int err;
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
if (err)
return err;
return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
} else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
if (err)
return err;
return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
} else {
return -EINVAL;
}
}
static int uv__setsockopt_maybe_char(uv_udp_t* handle, int option, int val) {
#if defined(__sun) || defined(_AIX)
char arg = val;
#else
int arg = val;
#endif
if (val < 0 || val > 255)
return -EINVAL;
if (setsockopt(handle->io_watcher.fd, IPPROTO_IP, option, &arg, sizeof(arg)))
return -errno;
return 0;
}
int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
if (setsockopt(handle->io_watcher.fd,
SOL_SOCKET,
SO_BROADCAST,
&on,
sizeof(on))) {
return -errno;
}
return 0;
}
int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
if (ttl < 1 || ttl > 255)
return -EINVAL;
if (setsockopt(handle->io_watcher.fd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)))
return -errno;
return 0;
}
int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
return uv__setsockopt_maybe_char(handle, IP_MULTICAST_TTL, ttl);
}
int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
return uv__setsockopt_maybe_char(handle, IP_MULTICAST_LOOP, on);
}
int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
struct sockaddr_storage addr_st;
struct sockaddr_in* addr4;
struct sockaddr_in6* addr6;
addr4 = (struct sockaddr_in*) &addr_st;
addr6 = (struct sockaddr_in6*) &addr_st;
if (!interface_addr) {
memset(&addr_st, 0, sizeof addr_st);
if (handle->flags & UV_HANDLE_IPV6) {
addr_st.ss_family = AF_INET6;
addr6->sin6_scope_id = 0;
} else {
addr_st.ss_family = AF_INET;
addr4->sin_addr.s_addr = htonl(INADDR_ANY);
}
} else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
/* nothing, address was parsed */
} else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
/* nothing, address was parsed */
} else {
return -EINVAL;
}
if (addr_st.ss_family == AF_INET) {
if (setsockopt(handle->io_watcher.fd,
IPPROTO_IP,
IP_MULTICAST_IF,
(void*) &addr4->sin_addr,
sizeof(addr4->sin_addr)) == -1) {
return -errno;
}
} else if (addr_st.ss_family == AF_INET6) {
if (setsockopt(handle->io_watcher.fd,
IPPROTO_IPV6,
IPV6_MULTICAST_IF,
&addr6->sin6_scope_id,
sizeof(addr6->sin6_scope_id)) == -1) {
return -errno;
}
} else {
assert(0 && "unexpected address family");
abort();
}
return 0;
}
int uv_udp_getsockname(const uv_udp_t* handle,
struct sockaddr* name,
int* namelen) {
socklen_t socklen;
if (handle->io_watcher.fd == -1)
return -EINVAL; /* FIXME(bnoordhuis) -EBADF */
/* sizeof(socklen_t) != sizeof(int) on some systems. */
socklen = (socklen_t) *namelen;
if (getsockname(handle->io_watcher.fd, name, &socklen))
return -errno;
*namelen = (int) socklen;
return 0;
}
int uv__udp_recv_start(uv_udp_t* handle,
uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb) {
int err;
if (alloc_cb == NULL || recv_cb == NULL)
return -EINVAL;
if (uv__io_active(&handle->io_watcher, UV__POLLIN))
return -EALREADY; /* FIXME(bnoordhuis) Should be -EBUSY. */
err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
if (err)
return err;
handle->alloc_cb = alloc_cb;
handle->recv_cb = recv_cb;
uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN);
uv__handle_start(handle);
return 0;
}
int uv__udp_recv_stop(uv_udp_t* handle) {
uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLIN);
if (!uv__io_active(&handle->io_watcher, UV__POLLOUT))
uv__handle_stop(handle);
handle->alloc_cb = NULL;
handle->recv_cb = NULL;
return 0;
}

View File

@ -0,0 +1,25 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
provider uv {
probe tick__start(void* loop, int mode);
probe tick__stop(void* loop, int mode);
};

View File

@ -0,0 +1,491 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
* include any headers.
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include "uv.h"
#include "uv-common.h"
#include <stdio.h>
#include <assert.h>
#include <stddef.h> /* NULL */
#include <stdlib.h> /* malloc */
#include <string.h> /* memset */
#if !defined(_WIN32)
# include <net/if.h> /* if_nametoindex */
#endif
/* EAI_* constants. */
#if !defined(_WIN32)
# include <sys/types.h>
# include <sys/socket.h>
# include <netdb.h>
#endif
#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
size_t uv_handle_size(uv_handle_type type) {
switch (type) {
UV_HANDLE_TYPE_MAP(XX)
default:
return -1;
}
}
size_t uv_req_size(uv_req_type type) {
switch(type) {
UV_REQ_TYPE_MAP(XX)
default:
return -1;
}
}
#undef XX
size_t uv_loop_size(void) {
return sizeof(uv_loop_t);
}
uv_buf_t uv_buf_init(char* base, unsigned int len) {
uv_buf_t buf;
buf.base = base;
buf.len = len;
return buf;
}
#define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
const char* uv_err_name(int err) {
switch (err) {
UV_ERRNO_MAP(UV_ERR_NAME_GEN)
default:
assert(0);
return NULL;
}
}
#undef UV_ERR_NAME_GEN
#define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
const char* uv_strerror(int err) {
switch (err) {
UV_ERRNO_MAP(UV_STRERROR_GEN)
default:
return "Unknown system error";
}
}
#undef UV_STRERROR_GEN
int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
memset(addr, 0, sizeof(*addr));
addr->sin_family = AF_INET;
addr->sin_port = htons(port);
return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
}
int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
char address_part[40];
size_t address_part_size;
const char* zone_index;
memset(addr, 0, sizeof(*addr));
addr->sin6_family = AF_INET6;
addr->sin6_port = htons(port);
zone_index = strchr(ip, '%');
if (zone_index != NULL) {
address_part_size = zone_index - ip;
if (address_part_size >= sizeof(address_part))
address_part_size = sizeof(address_part) - 1;
memcpy(address_part, ip, address_part_size);
address_part[address_part_size] = '\0';
ip = address_part;
zone_index++; /* skip '%' */
/* NOTE: unknown interface (id=0) is silently ignored */
#ifdef _WIN32
addr->sin6_scope_id = atoi(zone_index);
#else
addr->sin6_scope_id = if_nametoindex(zone_index);
#endif
}
return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
}
int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
}
int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
}
int uv_tcp_bind(uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int flags) {
unsigned int addrlen;
if (handle->type != UV_TCP)
return UV_EINVAL;
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
return uv__tcp_bind(handle, addr, addrlen, flags);
}
int uv_udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int flags) {
unsigned int addrlen;
if (handle->type != UV_UDP)
return UV_EINVAL;
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
return uv__udp_bind(handle, addr, addrlen, flags);
}
int uv_tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
uv_connect_cb cb) {
unsigned int addrlen;
if (handle->type != UV_TCP)
return UV_EINVAL;
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
return uv__tcp_connect(req, handle, addr, addrlen, cb);
}
int uv_udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
uv_udp_send_cb send_cb) {
unsigned int addrlen;
if (handle->type != UV_UDP)
return UV_EINVAL;
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
}
int uv_udp_try_send(uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr) {
unsigned int addrlen;
if (handle->type != UV_UDP)
return UV_EINVAL;
if (addr->sa_family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (addr->sa_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return UV_EINVAL;
return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
}
int uv_udp_recv_start(uv_udp_t* handle,
uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb) {
if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
return UV_EINVAL;
else
return uv__udp_recv_start(handle, alloc_cb, recv_cb);
}
int uv_udp_recv_stop(uv_udp_t* handle) {
if (handle->type != UV_UDP)
return UV_EINVAL;
else
return uv__udp_recv_stop(handle);
}
struct thread_ctx {
void (*entry)(void* arg);
void* arg;
};
#ifdef _WIN32
static UINT __stdcall uv__thread_start(void* arg)
#else
static void* uv__thread_start(void *arg)
#endif
{
struct thread_ctx *ctx_p;
struct thread_ctx ctx;
ctx_p = arg;
ctx = *ctx_p;
free(ctx_p);
ctx.entry(ctx.arg);
return 0;
}
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
struct thread_ctx* ctx;
int err;
ctx = malloc(sizeof(*ctx));
if (ctx == NULL)
return UV_ENOMEM;
ctx->entry = entry;
ctx->arg = arg;
#ifdef _WIN32
*tid = (HANDLE) _beginthreadex(NULL, 0, uv__thread_start, ctx, 0, NULL);
err = *tid ? 0 : errno;
#else
err = pthread_create(tid, NULL, uv__thread_start, ctx);
#endif
if (err)
free(ctx);
return err ? -1 : 0;
}
unsigned long uv_thread_self(void) {
#ifdef _WIN32
return (unsigned long) GetCurrentThreadId();
#else
return (unsigned long) pthread_self();
#endif
}
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
QUEUE* q;
uv_handle_t* h;
QUEUE_FOREACH(q, &loop->handle_queue) {
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
if (h->flags & UV__HANDLE_INTERNAL) continue;
walk_cb(h, arg);
}
}
#ifndef NDEBUG
static void uv__print_handles(uv_loop_t* loop, int only_active) {
const char* type;
QUEUE* q;
uv_handle_t* h;
if (loop == NULL)
loop = uv_default_loop();
QUEUE_FOREACH(q, &loop->handle_queue) {
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
if (only_active && !uv__is_active(h))
continue;
switch (h->type) {
#define X(uc, lc) case UV_##uc: type = #lc; break;
UV_HANDLE_TYPE_MAP(X)
#undef X
default: type = "<unknown>";
}
fprintf(stderr,
"[%c%c%c] %-8s %p\n",
"R-"[!(h->flags & UV__HANDLE_REF)],
"A-"[!(h->flags & UV__HANDLE_ACTIVE)],
"I-"[!(h->flags & UV__HANDLE_INTERNAL)],
type,
(void*)h);
}
}
void uv_print_all_handles(uv_loop_t* loop) {
uv__print_handles(loop, 0);
}
void uv_print_active_handles(uv_loop_t* loop) {
uv__print_handles(loop, 1);
}
#endif
void uv_ref(uv_handle_t* handle) {
uv__handle_ref(handle);
}
void uv_unref(uv_handle_t* handle) {
uv__handle_unref(handle);
}
int uv_has_ref(const uv_handle_t* handle) {
return uv__has_ref(handle);
}
void uv_stop(uv_loop_t* loop) {
loop->stop_flag = 1;
}
uint64_t uv_now(const uv_loop_t* loop) {
return loop->time;
}
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
unsigned int i;
size_t bytes;
bytes = 0;
for (i = 0; i < nbufs; i++)
bytes += (size_t) bufs[i].len;
return bytes;
}
int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
return uv__socket_sockopt(handle, SO_RCVBUF, value);
}
int uv_send_buffer_size(uv_handle_t* handle, int *value) {
return uv__socket_sockopt(handle, SO_SNDBUF, value);
}
int uv_fs_event_getpath(uv_fs_event_t* handle, char* buf, size_t* len) {
size_t required_len;
if (!uv__is_active(handle)) {
*len = 0;
return UV_EINVAL;
}
required_len = strlen(handle->path) + 1;
if (required_len > *len) {
*len = required_len;
return UV_ENOBUFS;
}
memcpy(buf, handle->path, required_len);
*len = required_len;
return 0;
}
void uv__fs_readdir_cleanup(uv_fs_t* req) {
uv__dirent_t** dents;
dents = req->ptr;
if (req->nbufs > 0 && req->nbufs != (unsigned int) req->result)
req->nbufs--;
for (; req->nbufs < (unsigned int) req->result; req->nbufs++)
free(dents[req->nbufs]);
}
int uv_fs_readdir_next(uv_fs_t* req, uv_dirent_t* ent) {
uv__dirent_t** dents;
uv__dirent_t* dent;
dents = req->ptr;
/* Free previous entity */
if (req->nbufs > 0)
free(dents[req->nbufs - 1]);
/* End was already reached */
if (req->nbufs == (unsigned int) req->result) {
free(dents);
req->ptr = NULL;
return UV_EOF;
}
dent = dents[req->nbufs++];
ent->name = dent->d_name;
if (dent->d_type == UV__DT_DIR)
ent->type = UV_DIRENT_DIR;
else
ent->type = UV_DIRENT_FILE;
return 0;
}

View File

@ -0,0 +1,206 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* This file is private to libuv. It provides common functionality to both
* Windows and Unix backends.
*/
#ifndef UV_COMMON_H_
#define UV_COMMON_H_
#include <assert.h>
#include <stddef.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv.h"
#include "tree.h"
#include "queue.h"
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#define container_of(ptr, type, member) \
((type *) ((char *) (ptr) - offsetof(type, member)))
#ifndef _WIN32
enum {
UV__HANDLE_INTERNAL = 0x8000,
UV__HANDLE_ACTIVE = 0x4000,
UV__HANDLE_REF = 0x2000,
UV__HANDLE_CLOSING = 0 /* no-op on unix */
};
#else
# define UV__HANDLE_INTERNAL 0x80
# define UV__HANDLE_ACTIVE 0x40
# define UV__HANDLE_REF 0x20
# define UV__HANDLE_CLOSING 0x01
#endif
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags);
int uv__tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
uv_connect_cb cb);
int uv__udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags);
int uv__udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
unsigned int addrlen,
uv_udp_send_cb send_cb);
int uv__udp_try_send(uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
unsigned int addrlen);
int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb,
uv_udp_recv_cb recv_cb);
int uv__udp_recv_stop(uv_udp_t* handle);
void uv__fs_poll_close(uv_fs_poll_t* handle);
int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */
void uv__work_submit(uv_loop_t* loop,
struct uv__work *w,
void (*work)(struct uv__work *w),
void (*done)(struct uv__work *w, int status));
void uv__work_done(uv_async_t* handle);
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
void uv__fs_readdir_cleanup(uv_fs_t* req);
#define uv__has_active_reqs(loop) \
(QUEUE_EMPTY(&(loop)->active_reqs) == 0)
#define uv__req_register(loop, req) \
do { \
QUEUE_INSERT_TAIL(&(loop)->active_reqs, &(req)->active_queue); \
} \
while (0)
#define uv__req_unregister(loop, req) \
do { \
assert(uv__has_active_reqs(loop)); \
QUEUE_REMOVE(&(req)->active_queue); \
} \
while (0)
#define uv__has_active_handles(loop) \
((loop)->active_handles > 0)
#define uv__active_handle_add(h) \
do { \
(h)->loop->active_handles++; \
} \
while (0)
#define uv__active_handle_rm(h) \
do { \
(h)->loop->active_handles--; \
} \
while (0)
#define uv__is_active(h) \
(((h)->flags & UV__HANDLE_ACTIVE) != 0)
#define uv__is_closing(h) \
(((h)->flags & (UV_CLOSING | UV_CLOSED)) != 0)
#define uv__handle_start(h) \
do { \
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
if (((h)->flags & UV__HANDLE_ACTIVE) != 0) break; \
(h)->flags |= UV__HANDLE_ACTIVE; \
if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_add(h); \
} \
while (0)
#define uv__handle_stop(h) \
do { \
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
if (((h)->flags & UV__HANDLE_ACTIVE) == 0) break; \
(h)->flags &= ~UV__HANDLE_ACTIVE; \
if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_rm(h); \
} \
while (0)
#define uv__handle_ref(h) \
do { \
if (((h)->flags & UV__HANDLE_REF) != 0) break; \
(h)->flags |= UV__HANDLE_REF; \
if (((h)->flags & UV__HANDLE_CLOSING) != 0) break; \
if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_add(h); \
} \
while (0)
#define uv__handle_unref(h) \
do { \
if (((h)->flags & UV__HANDLE_REF) == 0) break; \
(h)->flags &= ~UV__HANDLE_REF; \
if (((h)->flags & UV__HANDLE_CLOSING) != 0) break; \
if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_rm(h); \
} \
while (0)
#define uv__has_ref(h) \
(((h)->flags & UV__HANDLE_REF) != 0)
#if defined(_WIN32)
# define uv__handle_platform_init(h)
#else
# define uv__handle_platform_init(h) ((h)->next_closing = NULL)
#endif
#define uv__handle_init(loop_, h, type_) \
do { \
(h)->loop = (loop_); \
(h)->type = (type_); \
(h)->flags = UV__HANDLE_REF; /* Ref the loop when active. */ \
QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue); \
uv__handle_platform_init(h); \
} \
while (0)
#endif /* UV_COMMON_H_ */

View File

@ -0,0 +1,49 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#define UV_VERSION ((UV_VERSION_MAJOR << 16) | \
(UV_VERSION_MINOR << 8) | \
(UV_VERSION_PATCH))
#define UV_STRINGIFY(v) UV_STRINGIFY_HELPER(v)
#define UV_STRINGIFY_HELPER(v) #v
#define UV_VERSION_STRING_BASE UV_STRINGIFY(UV_VERSION_MAJOR) "." \
UV_STRINGIFY(UV_VERSION_MINOR) "." \
UV_STRINGIFY(UV_VERSION_PATCH)
#if UV_VERSION_IS_RELEASE
# define UV_VERSION_STRING UV_VERSION_STRING_BASE
#else
# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-pre"
#endif
unsigned int uv_version(void) {
return UV_VERSION;
}
const char* uv_version_string(void) {
return UV_VERSION_STRING;
}

View File

@ -0,0 +1,99 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "atomicops-inl.h"
#include "handle-inl.h"
#include "req-inl.h"
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) {
if (handle->flags & UV__HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
}
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
uv_req_t* req;
uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC);
handle->async_sent = 0;
handle->async_cb = async_cb;
req = &handle->async_req;
uv_req_init(loop, req);
req->type = UV_WAKEUP;
req->data = handle;
uv__handle_start(handle);
return 0;
}
void uv_async_close(uv_loop_t* loop, uv_async_t* handle) {
if (!((uv_async_t*)handle)->async_sent) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
uv__handle_closing(handle);
}
int uv_async_send(uv_async_t* handle) {
uv_loop_t* loop = handle->loop;
if (handle->type != UV_ASYNC) {
/* Can't set errno because that's not thread-safe. */
return -1;
}
/* The user should make sure never to call uv_async_send to a closing */
/* or closed handle. */
assert(!(handle->flags & UV__HANDLE_CLOSING));
if (!uv__atomic_exchange_set(&handle->async_sent)) {
POST_COMPLETION_FOR_REQ(loop, &handle->async_req);
}
return 0;
}
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
handle->async_sent = 0;
if (handle->flags & UV__HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*)handle);
} else if (handle->async_cb != NULL) {
handle->async_cb(handle);
}
}

View File

@ -0,0 +1,56 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_ATOMICOPS_INL_H_
#define UV_WIN_ATOMICOPS_INL_H_
#include "uv.h"
/* Atomic set operation on char */
#ifdef _MSC_VER /* MSVC */
/* _InterlockedOr8 is supported by MSVC on x32 and x64. It is slightly less */
/* efficient than InterlockedExchange, but InterlockedExchange8 does not */
/* exist, and interlocked operations on larger targets might require the */
/* target to be aligned. */
#pragma intrinsic(_InterlockedOr8)
static char __declspec(inline) uv__atomic_exchange_set(char volatile* target) {
return _InterlockedOr8(target, 1);
}
#else /* GCC */
/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
static inline char uv__atomic_exchange_set(char volatile* target) {
const char one = 1;
char old_value;
__asm__ __volatile__ ("lock xchgb %0, %1\n\t"
: "=r"(old_value), "=m"(*target)
: "0"(one), "m"(*target)
: "memory");
return old_value;
}
#endif
#endif /* UV_WIN_ATOMICOPS_INL_H_ */

View File

@ -0,0 +1,458 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)
#include <crtdbg.h>
#endif
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
/* The only event loop we support right now */
static uv_loop_t uv_default_loop_;
/* uv_once intialization guards */
static uv_once_t uv_init_guard_ = UV_ONCE_INIT;
static uv_once_t uv_default_loop_init_guard_ = UV_ONCE_INIT;
#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
/* Our crt debug report handler allows us to temporarily disable asserts
* just for the current thread.
*/
UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE;
static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) {
if (uv__crt_assert_enabled || report_type != _CRT_ASSERT)
return FALSE;
if (ret_val) {
/* Set ret_val to 0 to continue with normal execution.
* Set ret_val to 1 to trigger a breakpoint.
*/
if(IsDebuggerPresent())
*ret_val = 1;
else
*ret_val = 0;
}
/* Don't call _CrtDbgReport. */
return TRUE;
}
#else
UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE;
#endif
#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
const wchar_t* function, const wchar_t * file, unsigned int line,
uintptr_t reserved) {
/* No-op. */
}
#endif
static void uv_init(void) {
/* Tell Windows that we will handle critical errors. */
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
SEM_NOOPENFILEERRORBOX);
/* Tell the CRT to not exit the application when an invalid parameter is
* passed. The main issue is that invalid FDs will trigger this behavior.
*/
#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
_set_invalid_parameter_handler(uv__crt_invalid_parameter_handler);
#endif
/* We also need to setup our debug report handler because some CRT
* functions (eg _get_osfhandle) raise an assert when called with invalid
* FDs even though they return the proper error code in the release build.
*/
#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
_CrtSetReportHook(uv__crt_dbg_report_handler);
#endif
/* Fetch winapi function pointers. This must be done first because other
* intialization code might need these function pointers to be loaded.
*/
uv_winapi_init();
/* Initialize winsock */
uv_winsock_init();
/* Initialize FS */
uv_fs_init();
/* Initialize signal stuff */
uv_signals_init();
/* Initialize console */
uv_console_init();
/* Initialize utilities */
uv__util_init();
}
int uv_loop_init(uv_loop_t* loop) {
/* Initialize libuv itself first */
uv__once_init();
/* Create an I/O completion port */
loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
if (loop->iocp == NULL)
return uv_translate_sys_error(GetLastError());
/* To prevent uninitialized memory access, loop->time must be intialized
* to zero before calling uv_update_time for the first time.
*/
loop->time = 0;
loop->last_tick_count = 0;
uv_update_time(loop);
QUEUE_INIT(&loop->wq);
QUEUE_INIT(&loop->handle_queue);
QUEUE_INIT(&loop->active_reqs);
loop->active_handles = 0;
loop->pending_reqs_tail = NULL;
loop->endgame_handles = NULL;
RB_INIT(&loop->timers);
loop->check_handles = NULL;
loop->prepare_handles = NULL;
loop->idle_handles = NULL;
loop->next_prepare_handle = NULL;
loop->next_check_handle = NULL;
loop->next_idle_handle = NULL;
memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
loop->active_tcp_streams = 0;
loop->active_udp_streams = 0;
loop->timer_counter = 0;
loop->stop_flag = 0;
if (uv_mutex_init(&loop->wq_mutex))
abort();
if (uv_async_init(loop, &loop->wq_async, uv__work_done))
abort();
uv__handle_unref(&loop->wq_async);
loop->wq_async.flags |= UV__HANDLE_INTERNAL;
return 0;
}
static void uv_default_loop_init(void) {
/* Initialize libuv itself first */
uv__once_init();
/* Initialize the main loop */
uv_loop_init(&uv_default_loop_);
}
void uv__once_init(void) {
uv_once(&uv_init_guard_, uv_init);
}
uv_loop_t* uv_default_loop(void) {
uv_once(&uv_default_loop_init_guard_, uv_default_loop_init);
return &uv_default_loop_;
}
static void uv__loop_close(uv_loop_t* loop) {
/* close the async handle without needeing an extra loop iteration */
assert(!loop->wq_async.async_sent);
loop->wq_async.close_cb = NULL;
uv__handle_closing(&loop->wq_async);
uv__handle_close(&loop->wq_async);
if (loop != &uv_default_loop_) {
size_t i;
for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) {
SOCKET sock = loop->poll_peer_sockets[i];
if (sock != 0 && sock != INVALID_SOCKET)
closesocket(sock);
}
}
/* TODO: cleanup default loop*/
uv_mutex_lock(&loop->wq_mutex);
assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
}
int uv_loop_close(uv_loop_t* loop) {
QUEUE* q;
uv_handle_t* h;
if (!QUEUE_EMPTY(&(loop)->active_reqs))
return UV_EBUSY;
QUEUE_FOREACH(q, &loop->handle_queue) {
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
if (!(h->flags & UV__HANDLE_INTERNAL))
return UV_EBUSY;
}
uv__loop_close(loop);
#ifndef NDEBUG
memset(loop, -1, sizeof(*loop));
#endif
return 0;
}
uv_loop_t* uv_loop_new(void) {
uv_loop_t* loop;
loop = (uv_loop_t*)malloc(sizeof(uv_loop_t));
if (loop == NULL) {
return NULL;
}
if (uv_loop_init(loop)) {
free(loop);
return NULL;
}
return loop;
}
void uv_loop_delete(uv_loop_t* loop) {
int err = uv_loop_close(loop);
assert(err == 0);
if (loop != &uv_default_loop_)
free(loop);
}
int uv_backend_fd(const uv_loop_t* loop) {
return -1;
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (loop->pending_reqs_tail)
return 0;
if (loop->endgame_handles)
return 0;
if (loop->idle_handles)
return 0;
return uv__next_timeout(loop);
}
static void uv_poll(uv_loop_t* loop, DWORD timeout) {
DWORD bytes;
ULONG_PTR key;
OVERLAPPED* overlapped;
uv_req_t* req;
GetQueuedCompletionStatus(loop->iocp,
&bytes,
&key,
&overlapped,
timeout);
if (overlapped) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlapped);
uv_insert_pending_req(loop, req);
} else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
} else {
/* We're sure that at least `timeout` milliseconds have expired, but
* this may not be reflected yet in the GetTickCount() return value.
* Therefore we ensure it's taken into account here.
*/
uv__time_forward(loop, timeout);
}
}
static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) {
BOOL success;
uv_req_t* req;
OVERLAPPED_ENTRY overlappeds[128];
ULONG count;
ULONG i;
success = pGetQueuedCompletionStatusEx(loop->iocp,
overlappeds,
ARRAY_SIZE(overlappeds),
&count,
timeout,
FALSE);
if (success) {
for (i = 0; i < count; i++) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
uv_insert_pending_req(loop, req);
}
} else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
} else if (timeout > 0) {
/* We're sure that at least `timeout` milliseconds have expired, but
* this may not be reflected yet in the GetTickCount() return value.
* Therefore we ensure it's taken into account here.
*/
uv__time_forward(loop, timeout);
}
}
static int uv__loop_alive(const uv_loop_t* loop) {
return loop->active_handles > 0 ||
!QUEUE_EMPTY(&loop->active_reqs) ||
loop->endgame_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_run(uv_loop_t *loop, uv_run_mode mode) {
DWORD timeout;
int r;
void (*poll)(uv_loop_t* loop, DWORD timeout);
if (pGetQueuedCompletionStatusEx)
poll = &uv_poll_ex;
else
poll = &uv_poll;
r = uv__loop_alive(loop);
if (!r)
uv_update_time(loop);
while (r != 0 && loop->stop_flag == 0) {
uv_update_time(loop);
uv_process_timers(loop);
uv_process_reqs(loop);
uv_idle_invoke(loop);
uv_prepare_invoke(loop);
timeout = 0;
if ((mode & UV_RUN_NOWAIT) == 0)
timeout = uv_backend_timeout(loop);
(*poll)(loop, timeout);
uv_check_invoke(loop);
uv_process_endgames(loop);
if (mode == UV_RUN_ONCE) {
/* UV_RUN_ONCE implies forward progess: at least one callback must have
* been invoked when it returns. uv__io_poll() can return without doing
* I/O (meaning: no callbacks) when its timeout expires - which means we
* have pending timers that satisfy the forward progress constraint.
*
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv_update_time(loop);
uv_process_timers(loop);
}
r = uv__loop_alive(loop);
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
break;
}
/* The if statement lets the compiler compile it to a conditional store.
* Avoids dirtying a cache line.
*/
if (loop->stop_flag != 0)
loop->stop_flag = 0;
return r;
}
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
int r;
int len;
SOCKET socket;
if (handle == NULL || value == NULL)
return UV_EINVAL;
if (handle->type == UV_TCP)
socket = ((uv_tcp_t*) handle)->socket;
else if (handle->type == UV_UDP)
socket = ((uv_udp_t*) handle)->socket;
else
return UV_ENOTSUP;
len = sizeof(*value);
if (*value == 0)
r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len);
else
r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len);
if (r == SOCKET_ERROR)
return uv_translate_sys_error(WSAGetLastError());
return 0;
}

View File

@ -0,0 +1,86 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
static int uv__dlerror(uv_lib_t* lib, int errorno);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
WCHAR filename_w[32768];
lib->handle = NULL;
lib->errmsg = NULL;
if (!uv_utf8_to_utf16(filename, filename_w, ARRAY_SIZE(filename_w))) {
return uv__dlerror(lib, GetLastError());
}
lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
if (lib->handle == NULL) {
return uv__dlerror(lib, GetLastError());
}
return 0;
}
void uv_dlclose(uv_lib_t* lib) {
if (lib->errmsg) {
LocalFree((void*)lib->errmsg);
lib->errmsg = NULL;
}
if (lib->handle) {
/* Ignore errors. No good way to signal them without leaking memory. */
FreeLibrary(lib->handle);
lib->handle = NULL;
}
}
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
*ptr = (void*) GetProcAddress(lib->handle, name);
return uv__dlerror(lib, *ptr ? 0 : GetLastError());
}
const char* uv_dlerror(const uv_lib_t* lib) {
return lib->errmsg ? lib->errmsg : "no error";
}
static int uv__dlerror(uv_lib_t* lib, int errorno) {
if (lib->errmsg) {
LocalFree((void*)lib->errmsg);
lib->errmsg = NULL;
}
if (errorno) {
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
(LPSTR)&lib->errmsg, 0, NULL);
}
return errorno ? -1 : 0;
}

View File

@ -0,0 +1,170 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <malloc.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "uv.h"
#include "internal.h"
/*
* Display an error message and abort the event loop.
*/
void uv_fatal_error(const int errorno, const char* syscall) {
char* buf = NULL;
const char* errmsg;
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
if (buf) {
errmsg = buf;
} else {
errmsg = "Unknown error";
}
/* FormatMessage messages include a newline character already, */
/* so don't add another. */
if (syscall) {
fprintf(stderr, "%s: (%d) %s", syscall, errorno, errmsg);
} else {
fprintf(stderr, "(%d) %s", errorno, errmsg);
}
if (buf) {
LocalFree(buf);
}
*((char*)NULL) = 0xff; /* Force debug break */
abort();
}
int uv_translate_sys_error(int sys_errno) {
if (sys_errno <= 0) {
return sys_errno; /* If < 0 then it's already a libuv error. */
}
switch (sys_errno) {
case ERROR_NOACCESS: return UV_EACCES;
case WSAEACCES: return UV_EACCES;
case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
case WSAEADDRINUSE: return UV_EADDRINUSE;
case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
case WSAEAFNOSUPPORT: return UV_EAFNOSUPPORT;
case WSAEWOULDBLOCK: return UV_EAGAIN;
case WSAEALREADY: return UV_EALREADY;
case ERROR_INVALID_FLAGS: return UV_EBADF;
case ERROR_INVALID_HANDLE: return UV_EBADF;
case ERROR_LOCK_VIOLATION: return UV_EBUSY;
case ERROR_PIPE_BUSY: return UV_EBUSY;
case ERROR_SHARING_VIOLATION: return UV_EBUSY;
case ERROR_OPERATION_ABORTED: return UV_ECANCELED;
case WSAEINTR: return UV_ECANCELED;
case ERROR_NO_UNICODE_TRANSLATION: return UV_ECHARSET;
case ERROR_CONNECTION_ABORTED: return UV_ECONNABORTED;
case WSAECONNABORTED: return UV_ECONNABORTED;
case ERROR_CONNECTION_REFUSED: return UV_ECONNREFUSED;
case WSAECONNREFUSED: return UV_ECONNREFUSED;
case ERROR_NETNAME_DELETED: return UV_ECONNRESET;
case WSAECONNRESET: return UV_ECONNRESET;
case ERROR_ALREADY_EXISTS: return UV_EEXIST;
case ERROR_FILE_EXISTS: return UV_EEXIST;
case ERROR_BUFFER_OVERFLOW: return UV_EFAULT;
case WSAEFAULT: return UV_EFAULT;
case ERROR_HOST_UNREACHABLE: return UV_EHOSTUNREACH;
case WSAEHOSTUNREACH: return UV_EHOSTUNREACH;
case ERROR_INSUFFICIENT_BUFFER: return UV_EINVAL;
case ERROR_INVALID_DATA: return UV_EINVAL;
case ERROR_INVALID_PARAMETER: return UV_EINVAL;
case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL;
case WSAEINVAL: return UV_EINVAL;
case WSAEPFNOSUPPORT: return UV_EINVAL;
case WSAESOCKTNOSUPPORT: return UV_EINVAL;
case ERROR_BEGINNING_OF_MEDIA: return UV_EIO;
case ERROR_BUS_RESET: return UV_EIO;
case ERROR_CRC: return UV_EIO;
case ERROR_DEVICE_DOOR_OPEN: return UV_EIO;
case ERROR_DEVICE_REQUIRES_CLEANING: return UV_EIO;
case ERROR_DISK_CORRUPT: return UV_EIO;
case ERROR_EOM_OVERFLOW: return UV_EIO;
case ERROR_FILEMARK_DETECTED: return UV_EIO;
case ERROR_GEN_FAILURE: return UV_EIO;
case ERROR_INVALID_BLOCK_LENGTH: return UV_EIO;
case ERROR_IO_DEVICE: return UV_EIO;
case ERROR_NO_DATA_DETECTED: return UV_EIO;
case ERROR_NO_SIGNAL_SENT: return UV_EIO;
case ERROR_OPEN_FAILED: return UV_EIO;
case ERROR_SETMARK_DETECTED: return UV_EIO;
case ERROR_SIGNAL_REFUSED: return UV_EIO;
case WSAEISCONN: return UV_EISCONN;
case ERROR_CANT_RESOLVE_FILENAME: return UV_ELOOP;
case ERROR_TOO_MANY_OPEN_FILES: return UV_EMFILE;
case WSAEMFILE: return UV_EMFILE;
case WSAEMSGSIZE: return UV_EMSGSIZE;
case ERROR_FILENAME_EXCED_RANGE: return UV_ENAMETOOLONG;
case ERROR_NETWORK_UNREACHABLE: return UV_ENETUNREACH;
case WSAENETUNREACH: return UV_ENETUNREACH;
case WSAENOBUFS: return UV_ENOBUFS;
case ERROR_DIRECTORY: return UV_ENOENT;
case ERROR_FILE_NOT_FOUND: return UV_ENOENT;
case ERROR_INVALID_NAME: return UV_ENOENT;
case ERROR_INVALID_DRIVE: return UV_ENOENT;
case ERROR_INVALID_REPARSE_DATA: return UV_ENOENT;
case ERROR_MOD_NOT_FOUND: return UV_ENOENT;
case ERROR_PATH_NOT_FOUND: return UV_ENOENT;
case WSAHOST_NOT_FOUND: return UV_ENOENT;
case WSANO_DATA: return UV_ENOENT;
case ERROR_NOT_ENOUGH_MEMORY: return UV_ENOMEM;
case ERROR_OUTOFMEMORY: return UV_ENOMEM;
case ERROR_CANNOT_MAKE: return UV_ENOSPC;
case ERROR_DISK_FULL: return UV_ENOSPC;
case ERROR_EA_TABLE_FULL: return UV_ENOSPC;
case ERROR_END_OF_MEDIA: return UV_ENOSPC;
case ERROR_HANDLE_DISK_FULL: return UV_ENOSPC;
case ERROR_NOT_CONNECTED: return UV_ENOTCONN;
case WSAENOTCONN: return UV_ENOTCONN;
case ERROR_DIR_NOT_EMPTY: return UV_ENOTEMPTY;
case WSAENOTSOCK: return UV_ENOTSOCK;
case ERROR_NOT_SUPPORTED: return UV_ENOTSUP;
case ERROR_BROKEN_PIPE: return UV_EOF;
case ERROR_ACCESS_DENIED: return UV_EPERM;
case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM;
case ERROR_BAD_PIPE: return UV_EPIPE;
case ERROR_NO_DATA: return UV_EPIPE;
case ERROR_PIPE_NOT_CONNECTED: return UV_EPIPE;
case WSAESHUTDOWN: return UV_EPIPE;
case WSAEPROTONOSUPPORT: return UV_EPROTONOSUPPORT;
case ERROR_WRITE_PROTECT: return UV_EROFS;
case ERROR_SEM_TIMEOUT: return UV_ETIMEDOUT;
case WSAETIMEDOUT: return UV_ETIMEDOUT;
case ERROR_NOT_SAME_DEVICE: return UV_EXDEV;
case ERROR_INVALID_FUNCTION: return UV_EISDIR;
case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG;
default: return UV_UNKNOWN;
}
}

View File

@ -0,0 +1,527 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <malloc.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
const unsigned int uv_directory_watcher_buffer_size = 4096;
static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
uv_fs_event_t* handle) {
assert(handle->dir_handle != INVALID_HANDLE_VALUE);
assert(!handle->req_pending);
memset(&(handle->req.overlapped), 0, sizeof(handle->req.overlapped));
if (!ReadDirectoryChangesW(handle->dir_handle,
handle->buffer,
uv_directory_watcher_buffer_size,
FALSE,
FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_ATTRIBUTES |
FILE_NOTIFY_CHANGE_SIZE |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_LAST_ACCESS |
FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_SECURITY,
NULL,
&handle->req.overlapped,
NULL)) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(&handle->req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)&handle->req);
}
handle->req_pending = 1;
}
static int uv_split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
int len = wcslen(filename);
int i = len;
while (i > 0 && filename[--i] != '\\' && filename[i] != '/');
if (i == 0) {
if (dir) {
*dir = (WCHAR*)malloc((MAX_PATH + 1) * sizeof(WCHAR));
if (!*dir) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
if (!GetCurrentDirectoryW(MAX_PATH, *dir)) {
free(*dir);
*dir = NULL;
return -1;
}
}
*file = wcsdup(filename);
} else {
if (dir) {
*dir = (WCHAR*)malloc((i + 1) * sizeof(WCHAR));
if (!*dir) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
wcsncpy(*dir, filename, i);
(*dir)[i] = L'\0';
}
*file = (WCHAR*)malloc((len - i) * sizeof(WCHAR));
if (!*file) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
wcsncpy(*file, filename + i + 1, len - i - 1);
(*file)[len - i - 1] = L'\0';
}
return 0;
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_FS_EVENT);
handle->dir_handle = INVALID_HANDLE_VALUE;
handle->buffer = NULL;
handle->req_pending = 0;
handle->filew = NULL;
handle->short_filew = NULL;
handle->dirw = NULL;
uv_req_init(loop, (uv_req_t*)&handle->req);
handle->req.type = UV_FS_EVENT_REQ;
handle->req.data = handle;
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
int name_size, is_path_dir;
DWORD attr, last_error;
WCHAR* dir = NULL, *dir_to_watch, *pathw = NULL;
WCHAR short_path[MAX_PATH];
if (uv__is_active(handle))
return UV_EINVAL;
handle->cb = cb;
handle->path = strdup(path);
if (!handle->path) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
uv__handle_start(handle);
/* Convert name to UTF16. */
name_size = uv_utf8_to_utf16(path, NULL, 0) * sizeof(WCHAR);
pathw = (WCHAR*)malloc(name_size);
if (!pathw) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
if (!uv_utf8_to_utf16(path, pathw,
name_size / sizeof(WCHAR))) {
return uv_translate_sys_error(GetLastError());
}
/* Determine whether path is a file or a directory. */
attr = GetFileAttributesW(pathw);
if (attr == INVALID_FILE_ATTRIBUTES) {
last_error = GetLastError();
goto error;
}
is_path_dir = (attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0;
if (is_path_dir) {
/* path is a directory, so that's the directory that we will watch. */
handle->dirw = pathw;
dir_to_watch = pathw;
} else {
/*
* path is a file. So we split path into dir & file parts, and
* watch the dir directory.
*/
/* Convert to short path. */
if (!GetShortPathNameW(pathw, short_path, ARRAY_SIZE(short_path))) {
last_error = GetLastError();
goto error;
}
if (uv_split_path(pathw, &dir, &handle->filew) != 0) {
last_error = GetLastError();
goto error;
}
if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) {
last_error = GetLastError();
goto error;
}
dir_to_watch = dir;
free(pathw);
pathw = NULL;
}
handle->dir_handle = CreateFileW(dir_to_watch,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_DELETE |
FILE_SHARE_WRITE,
NULL,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS |
FILE_FLAG_OVERLAPPED,
NULL);
if (dir) {
free(dir);
dir = NULL;
}
if (handle->dir_handle == INVALID_HANDLE_VALUE) {
last_error = GetLastError();
goto error;
}
if (CreateIoCompletionPort(handle->dir_handle,
handle->loop->iocp,
(ULONG_PTR)handle,
0) == NULL) {
last_error = GetLastError();
goto error;
}
if (!handle->buffer) {
handle->buffer = (char*)_aligned_malloc(uv_directory_watcher_buffer_size,
sizeof(DWORD));
}
if (!handle->buffer) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
memset(&(handle->req.overlapped), 0, sizeof(handle->req.overlapped));
if (!ReadDirectoryChangesW(handle->dir_handle,
handle->buffer,
uv_directory_watcher_buffer_size,
FALSE,
FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_ATTRIBUTES |
FILE_NOTIFY_CHANGE_SIZE |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_LAST_ACCESS |
FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_SECURITY,
NULL,
&handle->req.overlapped,
NULL)) {
last_error = GetLastError();
goto error;
}
handle->req_pending = 1;
return 0;
error:
if (handle->path) {
free(handle->path);
handle->path = NULL;
}
if (handle->filew) {
free(handle->filew);
handle->filew = NULL;
}
if (handle->short_filew) {
free(handle->short_filew);
handle->short_filew = NULL;
}
free(pathw);
if (handle->dir_handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle->dir_handle);
handle->dir_handle = INVALID_HANDLE_VALUE;
}
if (handle->buffer) {
_aligned_free(handle->buffer);
handle->buffer = NULL;
}
return uv_translate_sys_error(last_error);
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
if (!uv__is_active(handle))
return 0;
if (handle->dir_handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle->dir_handle);
handle->dir_handle = INVALID_HANDLE_VALUE;
}
uv__handle_stop(handle);
if (handle->filew) {
free(handle->filew);
handle->filew = NULL;
}
if (handle->short_filew) {
free(handle->short_filew);
handle->short_filew = NULL;
}
if (handle->path) {
free(handle->path);
handle->path = NULL;
}
if (handle->dirw) {
free(handle->dirw);
handle->dirw = NULL;
}
return 0;
}
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle) {
FILE_NOTIFY_INFORMATION* file_info;
int err, sizew, size, result;
char* filename = NULL;
WCHAR* filenamew, *long_filenamew = NULL;
DWORD offset = 0;
assert(req->type == UV_FS_EVENT_REQ);
assert(handle->req_pending);
handle->req_pending = 0;
/* Don't report any callbacks if:
* - We're closing, just push the handle onto the endgame queue
* - We are not active, just ignore the callback
*/
if (!uv__is_active(handle)) {
if (handle->flags & UV__HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
}
return;
}
file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset);
if (REQ_SUCCESS(req)) {
if (req->overlapped.InternalHigh > 0) {
do {
file_info = (FILE_NOTIFY_INFORMATION*)((char*)file_info + offset);
assert(!filename);
assert(!long_filenamew);
/*
* Fire the event only if we were asked to watch a directory,
* or if the filename filter matches.
*/
if (handle->dirw ||
_wcsnicmp(handle->filew, file_info->FileName,
file_info->FileNameLength / sizeof(WCHAR)) == 0 ||
_wcsnicmp(handle->short_filew, file_info->FileName,
file_info->FileNameLength / sizeof(WCHAR)) == 0) {
if (handle->dirw) {
/*
* We attempt to convert the file name to its long form for
* events that still point to valid files on disk.
* For removed and renamed events, we do not provide the file name.
*/
if (file_info->Action != FILE_ACTION_REMOVED &&
file_info->Action != FILE_ACTION_RENAMED_OLD_NAME) {
/* Construct a full path to the file. */
size = wcslen(handle->dirw) +
file_info->FileNameLength / sizeof(WCHAR) + 2;
filenamew = (WCHAR*)malloc(size * sizeof(WCHAR));
if (!filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
_snwprintf(filenamew, size, L"%s\\%.*s", handle->dirw,
file_info->FileNameLength / sizeof(WCHAR),
file_info->FileName);
filenamew[size - 1] = L'\0';
/* Convert to long name. */
size = GetLongPathNameW(filenamew, NULL, 0);
if (size) {
long_filenamew = (WCHAR*)malloc(size * sizeof(WCHAR));
if (!long_filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
size = GetLongPathNameW(filenamew, long_filenamew, size);
if (size) {
long_filenamew[size] = '\0';
} else {
free(long_filenamew);
long_filenamew = NULL;
}
}
free(filenamew);
if (long_filenamew) {
/* Get the file name out of the long path. */
result = uv_split_path(long_filenamew, NULL, &filenamew);
free(long_filenamew);
if (result == 0) {
long_filenamew = filenamew;
sizew = -1;
} else {
long_filenamew = NULL;
}
}
/*
* If we couldn't get the long name - just use the name
* provided by ReadDirectoryChangesW.
*/
if (!long_filenamew) {
filenamew = file_info->FileName;
sizew = file_info->FileNameLength / sizeof(WCHAR);
}
} else {
/* Removed or renamed callbacks don't provide filename. */
filenamew = NULL;
}
} else {
/* We already have the long name of the file, so just use it. */
filenamew = handle->filew;
sizew = -1;
}
if (filenamew) {
/* Convert the filename to utf8. */
size = uv_utf16_to_utf8(filenamew,
sizew,
NULL,
0);
if (size) {
filename = (char*)malloc(size + 1);
if (!filename) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
size = uv_utf16_to_utf8(filenamew,
sizew,
filename,
size);
if (size) {
filename[size] = '\0';
} else {
free(filename);
filename = NULL;
}
}
}
switch (file_info->Action) {
case FILE_ACTION_ADDED:
case FILE_ACTION_REMOVED:
case FILE_ACTION_RENAMED_OLD_NAME:
case FILE_ACTION_RENAMED_NEW_NAME:
handle->cb(handle, filename, UV_RENAME, 0);
break;
case FILE_ACTION_MODIFIED:
handle->cb(handle, filename, UV_CHANGE, 0);
break;
}
free(filename);
filename = NULL;
free(long_filenamew);
long_filenamew = NULL;
}
offset = file_info->NextEntryOffset;
} while (offset && !(handle->flags & UV__HANDLE_CLOSING));
} else {
handle->cb(handle, NULL, UV_CHANGE, 0);
}
} else {
err = GET_REQ_ERROR(req);
handle->cb(handle, NULL, 0, uv_translate_sys_error(err));
}
if (!(handle->flags & UV__HANDLE_CLOSING)) {
uv_fs_event_queue_readdirchanges(loop, handle);
} else {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
uv__handle_closing(handle);
if (!handle->req_pending) {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
if ((handle->flags & UV__HANDLE_CLOSING) && !handle->req_pending) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
if (handle->buffer) {
_aligned_free(handle->buffer);
handle->buffer = NULL;
}
uv__handle_close(handle);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,354 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <malloc.h>
#include "uv.h"
#include "internal.h"
#include "req-inl.h"
int uv__getaddrinfo_translate_error(int sys_err) {
switch (sys_err) {
case 0: return 0;
case WSATRY_AGAIN: return UV_EAI_AGAIN;
case WSAEINVAL: return UV_EAI_BADFLAGS;
case WSANO_RECOVERY: return UV_EAI_FAIL;
case WSAEAFNOSUPPORT: return UV_EAI_FAMILY;
case WSA_NOT_ENOUGH_MEMORY: return UV_EAI_MEMORY;
case WSAHOST_NOT_FOUND: return UV_EAI_NONAME;
case WSATYPE_NOT_FOUND: return UV_EAI_SERVICE;
case WSAESOCKTNOSUPPORT: return UV_EAI_SOCKTYPE;
default: return uv_translate_sys_error(sys_err);
}
}
/*
* MinGW is missing this
*/
#if !defined(_MSC_VER) && !defined(__MINGW64_VERSION_MAJOR)
typedef struct addrinfoW {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
size_t ai_addrlen;
WCHAR* ai_canonname;
struct sockaddr* ai_addr;
struct addrinfoW* ai_next;
} ADDRINFOW, *PADDRINFOW;
DECLSPEC_IMPORT int WSAAPI GetAddrInfoW(const WCHAR* node,
const WCHAR* service,
const ADDRINFOW* hints,
PADDRINFOW* result);
DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo);
#endif
/* adjust size value to be multiple of 4. Use to keep pointer aligned */
/* Do we need different versions of this for different architectures? */
#define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2)
static void uv__getaddrinfo_work(struct uv__work* w) {
uv_getaddrinfo_t* req;
int err;
req = container_of(w, uv_getaddrinfo_t, work_req);
err = GetAddrInfoW(req->node, req->service, req->hints, &req->res);
req->retcode = uv__getaddrinfo_translate_error(err);
}
/*
* Called from uv_run when complete. Call user specified callback
* then free returned addrinfo
* Returned addrinfo strings are converted from UTF-16 to UTF-8.
*
* To minimize allocation we calculate total size required,
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req;
int addrinfo_len = 0;
int name_len = 0;
size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo));
struct addrinfoW* addrinfow_ptr;
struct addrinfo* addrinfo_ptr;
char* alloc_ptr = NULL;
char* cur_ptr = NULL;
req = container_of(w, uv_getaddrinfo_t, work_req);
/* release input parameter memory */
if (req->alloc != NULL) {
free(req->alloc);
req->alloc = NULL;
}
if (status == UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
if (req->res != NULL) {
FreeAddrInfoW(req->res);
req->res = NULL;
}
goto complete;
}
if (req->retcode == 0) {
/* convert addrinfoW to addrinfo */
/* first calculate required length */
addrinfow_ptr = req->res;
while (addrinfow_ptr != NULL) {
addrinfo_len += addrinfo_struct_len +
ALIGNED_SIZE(addrinfow_ptr->ai_addrlen);
if (addrinfow_ptr->ai_canonname != NULL) {
name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname, -1, NULL, 0);
if (name_len == 0) {
req->retcode = uv_translate_sys_error(GetLastError());
goto complete;
}
addrinfo_len += ALIGNED_SIZE(name_len);
}
addrinfow_ptr = addrinfow_ptr->ai_next;
}
/* allocate memory for addrinfo results */
alloc_ptr = (char*)malloc(addrinfo_len);
/* do conversions */
if (alloc_ptr != NULL) {
cur_ptr = alloc_ptr;
addrinfow_ptr = req->res;
while (addrinfow_ptr != NULL) {
/* copy addrinfo struct data */
assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len);
addrinfo_ptr = (struct addrinfo*)cur_ptr;
addrinfo_ptr->ai_family = addrinfow_ptr->ai_family;
addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype;
addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol;
addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags;
addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen;
addrinfo_ptr->ai_canonname = NULL;
addrinfo_ptr->ai_addr = NULL;
addrinfo_ptr->ai_next = NULL;
cur_ptr += addrinfo_struct_len;
/* copy sockaddr */
if (addrinfo_ptr->ai_addrlen > 0) {
assert(cur_ptr + addrinfo_ptr->ai_addrlen <=
alloc_ptr + addrinfo_len);
memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen);
addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr;
cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen);
}
/* convert canonical name to UTF-8 */
if (addrinfow_ptr->ai_canonname != NULL) {
name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname,
-1,
NULL,
0);
assert(name_len > 0);
assert(cur_ptr + name_len <= alloc_ptr + addrinfo_len);
name_len = uv_utf16_to_utf8(addrinfow_ptr->ai_canonname,
-1,
cur_ptr,
name_len);
assert(name_len > 0);
addrinfo_ptr->ai_canonname = cur_ptr;
cur_ptr += ALIGNED_SIZE(name_len);
}
assert(cur_ptr <= alloc_ptr + addrinfo_len);
/* set next ptr */
addrinfow_ptr = addrinfow_ptr->ai_next;
if (addrinfow_ptr != NULL) {
addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr;
}
}
} else {
req->retcode = UV_EAI_MEMORY;
}
}
/* return memory to system */
if (req->res != NULL) {
FreeAddrInfoW(req->res);
req->res = NULL;
}
complete:
uv__req_unregister(req->loop, req);
/* finally do callback with converted result */
req->getaddrinfo_cb(req, req->retcode, (struct addrinfo*)alloc_ptr);
}
void uv_freeaddrinfo(struct addrinfo* ai) {
char* alloc_ptr = (char*)ai;
/* release copied result memory */
if (alloc_ptr != NULL) {
free(alloc_ptr);
}
}
/*
* Entry point for getaddrinfo
* we convert the UTF-8 strings to UNICODE
* and save the UNICODE string pointers in the req
* We also copy hints so that caller does not need to keep memory until the
* callback.
* return 0 if a callback will be made
* return error code if validation fails
*
* To minimize allocation we calculate total size required,
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
int uv_getaddrinfo(uv_loop_t* loop,
uv_getaddrinfo_t* req,
uv_getaddrinfo_cb getaddrinfo_cb,
const char* node,
const char* service,
const struct addrinfo* hints) {
int nodesize = 0;
int servicesize = 0;
int hintssize = 0;
char* alloc_ptr = NULL;
int err;
if (req == NULL || getaddrinfo_cb == NULL ||
(node == NULL && service == NULL)) {
err = WSAEINVAL;
goto error;
}
uv_req_init(loop, (uv_req_t*)req);
req->getaddrinfo_cb = getaddrinfo_cb;
req->res = NULL;
req->type = UV_GETADDRINFO;
req->loop = loop;
req->retcode = 0;
/* calculate required memory size for all input values */
if (node != NULL) {
nodesize = ALIGNED_SIZE(uv_utf8_to_utf16(node, NULL, 0) * sizeof(WCHAR));
if (nodesize == 0) {
err = GetLastError();
goto error;
}
}
if (service != NULL) {
servicesize = ALIGNED_SIZE(uv_utf8_to_utf16(service, NULL, 0) *
sizeof(WCHAR));
if (servicesize == 0) {
err = GetLastError();
goto error;
}
}
if (hints != NULL) {
hintssize = ALIGNED_SIZE(sizeof(struct addrinfoW));
}
/* allocate memory for inputs, and partition it as needed */
alloc_ptr = (char*)malloc(nodesize + servicesize + hintssize);
if (!alloc_ptr) {
err = WSAENOBUFS;
goto error;
}
/* save alloc_ptr now so we can free if error */
req->alloc = (void*)alloc_ptr;
/* convert node string to UTF16 into allocated memory and save pointer in */
/* the reques. */
if (node != NULL) {
req->node = (WCHAR*)alloc_ptr;
if (uv_utf8_to_utf16(node,
(WCHAR*) alloc_ptr,
nodesize / sizeof(WCHAR)) == 0) {
err = GetLastError();
goto error;
}
alloc_ptr += nodesize;
} else {
req->node = NULL;
}
/* convert service string to UTF16 into allocated memory and save pointer */
/* in the req. */
if (service != NULL) {
req->service = (WCHAR*)alloc_ptr;
if (uv_utf8_to_utf16(service,
(WCHAR*) alloc_ptr,
servicesize / sizeof(WCHAR)) == 0) {
err = GetLastError();
goto error;
}
alloc_ptr += servicesize;
} else {
req->service = NULL;
}
/* copy hints to allocated memory and save pointer in req */
if (hints != NULL) {
req->hints = (struct addrinfoW*)alloc_ptr;
req->hints->ai_family = hints->ai_family;
req->hints->ai_socktype = hints->ai_socktype;
req->hints->ai_protocol = hints->ai_protocol;
req->hints->ai_flags = hints->ai_flags;
req->hints->ai_addrlen = 0;
req->hints->ai_canonname = NULL;
req->hints->ai_addr = NULL;
req->hints->ai_next = NULL;
} else {
req->hints = NULL;
}
uv__work_submit(loop,
&req->work_req,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
uv__req_register(loop, req);
return 0;
error:
if (req != NULL && req->alloc != NULL) {
free(req->alloc);
}
return uv_translate_sys_error(err);
}

Some files were not shown because too many files have changed in this diff Show More