diff --git a/ape/hood.hoon b/ape/hood.hoon index a14cf7d4ee..41e917231d 100644 --- a/ape/hood.hoon +++ b/ape/hood.hoon @@ -86,6 +86,7 @@ ++ peer-drum (wrap peer):from-drum ++ poke-dill-belt (wrap poke-dill-belt):from-drum ++ poke-drum-link (wrap poke-link):from-drum +++ poke-drum-unlink (wrap poke-unlink):from-drum ::++ poke-drum-exit (wrap poke-exit):from-drum ++ poke-drum-start (wrap poke-start):from-drum ++ poke-helm-hi (wrap poke-hi):from-helm diff --git a/arvo/ford.hoon b/arvo/ford.hoon index 83a1b75940..db1d094b73 100644 --- a/arvo/ford.hoon +++ b/arvo/ford.hoon @@ -1430,8 +1430,8 @@ ^- (bolt type) %+ (clef %slit) (fine cof gat sam) |= [cof=cafe gat=type sam=type] - %+ cool |.(>sam<) - %+ cool |.(>(~(peek ut gat) %free 6)<) + %+ cool |.(%.(%have ~(dunk ut sam))) + %+ cool |.(%.(%want ~(dunk ut (~(peek ut gat) %free 6)))) =+ top=(mule |.((slit gat sam))) ?- -.top | (flaw cof p.top) diff --git a/blog/hymn.hook b/blog/hymn.hook deleted file mode 100644 index 83c7922618..0000000000 --- a/blog/hymn.hook +++ /dev/null @@ -1,2 +0,0 @@ -/: /%%%/tree/pub/blog /% /hymn/ --< diff --git a/blog/json.hook b/blog/json.hook deleted file mode 100644 index 93678cb230..0000000000 --- a/blog/json.hook +++ /dev/null @@ -1,2 +0,0 @@ -/: /%%%/tree/pub/blog /% /json/ --< diff --git a/docs/hymn.hook b/docs/hymn.hook new file mode 100644 index 0000000000..c43ddb6824 --- /dev/null +++ b/docs/hymn.hook @@ -0,0 +1,2 @@ +/: /%%%/tree/pub/docs /% /hymn/ +-< diff --git a/docs/json.hook b/docs/json.hook new file mode 100644 index 0000000000..4e01a5ea6d --- /dev/null +++ b/docs/json.hook @@ -0,0 +1,2 @@ +/: /%%%/tree/pub/docs /% /json/ +-< diff --git a/front/hymn.hook b/front/hymn.hook new file mode 100644 index 0000000000..53e8dadc81 --- /dev/null +++ b/front/hymn.hook @@ -0,0 +1,2 @@ +/: /%%%/tree/pub/front /% /hymn/ +-< diff --git a/front/json.hook b/front/json.hook new file mode 100644 index 0000000000..5b110b21fe --- /dev/null +++ b/front/json.hook @@ -0,0 +1,2 @@ +/: /%%%/tree/pub/front /% /json/ +-< diff --git a/gen/hood/unlink.hoon b/gen/hood/unlink.hoon new file mode 100644 index 0000000000..033fa34d59 --- /dev/null +++ b/gen/hood/unlink.hoon @@ -0,0 +1,15 @@ +:: +:::: /hoon/link/hood/gen + :: +/? 314 +:: +:::: + !: +:- %say +|= $: [now=@da eny=@uvI byk=beak] + [arg=$?([dap=term ~] [who=ship dap=term ~]) ~] + == +:- %drum-unlink +?~ +.arg + [p.byk dap.arg] +[who.arg dap.arg] diff --git a/gen/moon.hoon b/gen/moon.hoon new file mode 100644 index 0000000000..179ef1cb2c --- /dev/null +++ b/gen/moon.hoon @@ -0,0 +1,16 @@ +:: +:::: /hoon/ticket/gen + :: +/? 314 +:: +:::: + !: +:- %say +|= $: [now=@da eny=@uvI bec=beak] + [~ ~] + == +:- %noun +?> =(1 (met 5 p.bec)) +=+ mon=(mix (lsh 5 1 (end 5 1 eny)) p.bec) +=+ tic=((hard ,@p) .^(/a/(scot %p p.bec)/tick/(scot %da now)/(scot %p mon))) +"moon: {<`@p`mon>}; ticket: {}" diff --git a/lib/drum.hoon b/lib/drum.hoon index ab6c8528a7..29c64c87c6 100644 --- a/lib/drum.hoon +++ b/lib/drum.hoon @@ -178,6 +178,11 @@ =< se-abet =< se-view (se-link gyl) :: +++ poke-unlink :: + |= gyl=gill + =< se-abet =< se-view + (se-klin gyl) +:: :: ++ poke-exit :: :: |=(~ se-abet:(se-blit `dill-blit`[%qit ~])) :: XX find bone :: :: @@ -407,6 +412,10 @@ ?~ t.yal i.yal :(welp i.yal ", " $(yal t.yal)) :: +++ se-klin :: disconnect app + |= gyl=gill + +>(eel (~(del in eel) gyl)) +:: ++ se-link :: connect to app |= gyl=gill +>(eel (~(put in eel) gyl)) diff --git a/lib/urb.js b/lib/urb.js index e7708448dc..fcc0cbeb6e 100644 --- a/lib/urb.js +++ b/lib/urb.js @@ -275,6 +275,7 @@ window.urb.util = { }, basepath: function(spur, pathname){ spur = spur || '' + if(spur === '/') spur = '' pathname = pathname || window.location.pathname if(pathname[0] == '/') pathname = pathname.slice(1) pathname = pathname.split("/") diff --git a/pub/doc/arvo.mdy b/pub/doc/arvo.mdy deleted file mode 100644 index 3ca61fa60e..0000000000 --- a/pub/doc/arvo.mdy +++ /dev/null @@ -1,150 +0,0 @@ ---- -sort: 2 ---- - -arvo -==== - -Our operating system. - -arvo is composed of modules called vanes: - - - -
- -At a high level `%arvo` takes a mess of unix io events and turns them -into something clean and structured for the programmer. - -`%arvo` is designed to avoid the usual state of complex event networks: -event spaghetti. We keep track of every event's cause so that we have a -clear causal chain for every computation. At the bottom of every chain -is a unix io event, such as a network request, terminal input, file -sync, or timer event. We push every step in the path the request takes -onto the chain until we get to the terminal cause of the computation. -Then we use this causal stack to route results back to the caller. - -`++ducts` ---------- - -The `%arvo` causal stack is called a `++duct`. This is represented -simply as a list of paths, where each path represents a step in the -causal chain. The first element in the path is the first letter of -whichever vane handled that step in the computation, or the empty span -for unix. - -Here's a duct that was recently observed in the wild: - - ~[ - /g/a/~zod/4_shell_terminal/u/time - /g/a/~zod/shell_terminal/u/child/4/main - /g/a/~zod/terminal/u/txt - /d/term-mess - //term/1 - ] - -This is the duct the timer vane receives when "timer" sample app asks -the timer vane to set a timer. This is also the duct over which the -response is produced at the specified time. Unix sent a terminal -keystroke event (enter), and arvo routed it to %dill(our terminal), -which passed it on to the %gall app terminal, which sent it to shell, -its child, which created a new child (with process id 4), which on -startup asked the timer vane to set a timer. - -The timer vane saves this duct, so that when the specified time arrives -and unix sends a wakeup event to the timer vane, it can produce the -response on the same duct. This response is routed to the place we -popped off the top of the duct, i.e. the time app. This app produces the -text "ding", which falls down to the shell, which drops it through to -the terminal. Terminal drops this down to dill, which converts it into -an effect that unix will recognize as a request to print "ding" to the -screen. When dill produces this, the last path in the duct has an -initial element of the empty span, so this is routed to unix, which -applies the effects. - -This is a call stack, with a crucial feature: the stack is a first-class -citizen. You can respond over a duct zero, one, or many times. You can -save ducts for later use. There are definitely parallels to Scheme-style -continuations, but simpler and with more structure. - -Making Moves ------------- - -If ducts are a call stack, then how do we make calls and produce -results? Arvo processes "moves" which are a combination of message data -and metadata. There are two types of moves. A `%pass` move is analogous -to a call: - - [duct %pass return-path=path vane-name=@tD data=card] - -Arvo pushes the return path (preceded by the first letter of the vane -name) onto the duct and sends the given data, a card, to the vane we -specified. Any response will come along the same duct with the path -`return-path`. - -A `%give` move is analogous to a return: - - [duct %give data=card] - -Arvo pops the top path off the duct and sends the given card back to the -caller. - -Vanes ------ - -As shown above, we use arvo proper to route and control the flow of -moves. However, arvo proper is rarely directly responsible for -processing the event data that directly causes the desired outcome of a -move. This event data is contained within a card, which is simply a -`(pair term noun)`. Instead, arvo proper passes the card off to one of -its vanes, which each present an interface to clients for a particular -well-defined, stable, and general-purpose piece of functionality. - -As of this writing, we have seven vanes, which each provide the -following services: - -- `%ames` name of both our network and the vane that communicates over - it -- `%clay` version-controlled, referentially- transparent, and global - filesystem -- `%dill` terminal driver. Unix sends keyboard events to `%dill` from - either the console or telnet, and `%dill` produces terminal output. -- `%eyre` http server. Unix sends http messages to `%eyre`, and - `%eyre` produces http messages in response -- `%ford` handles resources and publishing -- `%gall` manages our userspace applications.. `%gall` keeps state and - manages subscribers -- `%time` a simple timer - -Cards ------ - -Cards are the vane-specific portion of a move. Each vane defines a -protocol for interacting with other vanes (via arvo) by defining four -types of cards: kisses, gifts, notes, and signs. - -When one vane is `%pass`ed a card in its `++kiss`, arvo activates the -`++call` gate with the card as its argument. To produce a result, the -vane `%give`s one of the cards defined in its `++gift`. If the vane -needs to request something of another vane, it `%pass`es it a `++note` -card. When that other vane returns a result, arvo activates the `++take` -gate of the initial vane with one of the cards defined in its `++sign`. - -In other words, there are only four ways of seeing a move: (1) as a -request seen by the caller, which is a ++note. (2) that same request as -seen by the callee, a `++kiss`. (3) the response to that first request -as seen by the callee, a `++gift`. (4) the response to the first request -as seen by the caller, a `++sign`. - -When a `++kiss` card is passed to a vane, arvo calls its `++call` gate, -passing it both the card and its duct. This gate must be defined in -every vane. It produces two things in the following order: a list of -moves and a possibly-modified copy of its context. The moves are used to -interact with other vanes, while the new context allows the vane to save -its state. The next time arvo activates the vane it will have this -context as its subject. - -This overview has detailed how to pass a card to a particular vane. To -see the cards each vane can be `%pass`ed as a `++kiss` or return as a -`++gift` (as well as the semantics tied to them), each vane's public -interface is explained in detail in its respective overview. diff --git a/pub/doc/arvo/ames.md b/pub/doc/arvo/ames.md deleted file mode 100644 index 68557ef27e..0000000000 --- a/pub/doc/arvo/ames.md +++ /dev/null @@ -1,29 +0,0 @@ -
- -`%ames` -======= - -Our networking protocol. - -`%ames` is the name of both our network and the vane that communicates -over it. When Unix receives a packet over the correct UDP port, it pipes -it straight into `%ames` for handling. Also, all packets sent over the -`%ames` network are sent by the `%ames` vane. Apps and vanes may use -`%ames` to directly send messages to other ships. In general, apps use -gall and clay to communicate with other ships rather than using `%ames` -directly, but this isn't a requirement. Of course, gall and clay use -`%ames` behind the scenes to communicate across the network. These are -the only two vanes that use `%ames`. - -`%ames` includes several significant components. Although the actual -crypto algorithms are defined in zuse, they're used extensively in -`%ames` for encrypting and decrypting packets. Congestion control and -routing is handled entirely in `%ames`. Finally, the actual `%ames` -protocol itself, including how to route incoming packets to the correct -vane or app, is defined in `%ames`. - -
- -
- - diff --git a/pub/doc/arvo/ames/commentary.md b/pub/doc/arvo/ames/commentary.md deleted file mode 100644 index 8e6397c09d..0000000000 --- a/pub/doc/arvo/ames/commentary.md +++ /dev/null @@ -1,1815 +0,0 @@ -`%ames` commentary -================== - -`%ames` is our networking protocol. - -First we give commentary on the code, the algorithms involved, and the -protocol. We trace through the code touched when a packet is sent, -received, acknowledged, and that acknowledgment applied. This is fairly -comprehensive, and contains many implementation details, but if you -understand this, then you understand `%ames`. - -If you've scrolled down this page, you may be intimidated by the amount -of Hoon code, especially if you are new to the language. Don't be afraid -of it, you don't have to read any of it if you don't want to -- every -interesting action the code takes is explained in plain English. In -fact, if you are new to the language, this may be a good learning -opportunity. Even if you don't understand every line of Hoon code, -you'll hopefully be able to follow most lines. By the time you've worked -through this, you'll have seen many common patterns and best practices. -Hoon, much more than other languages, is best learned by reading and -understanding large quantities of existing code. In this way, it is -similar to learning a natural language. All of this code is in -`arvo/ames.hoon`. - -After the commentary, we have reference documentation for all the data -structures that are specific to `%ames`. If you see a data structure or -a variable used that you don't recognize, search for it in the code, and -it's very likely defined in one of these data structures. We recommend -that another tab is kept open for easy access to the data structure -reference documentation. The code for these is split between -`arvo/ames.hoon` and `arvo/zuse.hoon`. - -The Lifecycle of a Packet (or, How a Packet Becomes Law) --------------------------------------------------------- - -Here, we will trace a packet as it makes its way through ames. There are -actually two pathways through ames: the legacy path through `%want`, and -the modern way, entered through `%wont`, with full end-to-end -acknowledgments. Here we will only trace the modern way, though much of -the path is the same for both. - -When an app (or a vane) wishes to send a packet to another ship, it must -send a `%wont` card: - - [%wont p=sock q=path r=*] :: e2e send message - -This card takes three arguments. The `p` is a `sock`, that is, a pair of -two ships, the first of which is the sender and the second is the -receiver. But wait, you ask, why do I get to decide who is the sender? -Can I fake like I'm someone else? The reason is that there are -potentially multiple ships on the same pier, and the kernel can send a -message from any of them. If you attempt to send a message from a ship -not on your pier, then ames will refuse to send it. If you hack around -in your own copy of ames to go ahead and send it anyway, then the other -ship will reject it because your key is bad. Only send messages from -yourself. - -The `q` is a path, representing the place on the other side that you -want to receive your message. It is approximately equivalent to a port -number. Messages on the same path are guaranteed to arrive in the same -order as they were sent. No such guarantees are made across paths. - -The `r` is the actual data that you are sending. As the type implies, -this can be an arbitrary noun, and it will be transferred to the -receiver exactly as-is, in a well-typed way. Of course, this is data -that is sent over the wire, so be careful not to send anything too -massive unless you're willing to wait. - -But enough about the interface. Grepping in ames.hoon for `%wont`, we -find that it appears in exactly two places: at its definition in -`++kiss`, and in `++knob`, where it is handled. We see that we go -directly into `++wise:am`. - - ++ wise :: wise:am - |= [soq=sock hen=duct cha=path val=* ete=?] :: send a statement - ^- [p=(list boon) q=fort] - zork:zank:(wool:(ho:(um p.soq) q.soq) hen cha val ete) - -The inputs to this gate are exactly the sort of thing you'd expect. In -particular, everything in the `%wont` gate is here plus the calling duct -so that we know where to send the acknowledgment and `ete` to determine -if we're going to do the modern end-to-end acknowledgments. - -The actual line of code looks intimidating, but it's really not all that -bad. Working from the inside out, the call to `++um` sets up our -domestic server, and the call to `++ho` sets up our knowledge about the -neighbor we're sending to. From the outside, `++zork` and `++zank` just -apply the changes made to our `++um` and `++am` cores, respectively. If -you're familiar with the common idiom of `++abet`, that's all this is. -The code predates the widespread usage of that name. - -The interesting part, then, is in `++wool:ho:um:am`. Let's look at the -code. - - ++ wool :: wool:ho:um:am - |= [hen=duct cha=path val=* ete=?] :: send a statement - ^+ +> - =+ ^= rol ^- rill - =+ rol=(~(get by ryl.bah) cha) - ?~(rol *rill u.rol) - =+ sex=sed.rol - :: ~& [%tx [our her] cha sex] - =. ryl.bah - %+ ~(put by ryl.bah) cha - rol(sed +(sed.rol), san (~(put by san.rol) sex hen)) - =+ cov=[p=p:sen:gus q=clon:diz] - %+ wind [cha sex] - ?: ete - [%bund q.cov cha sex val] - [%bond q.cov cha sex val] - -This is slightly more complicated, but it's still not all that bad. Our -inputs, at least, are fairly obvious. - -If you glance at the code for a second, you'll see that -`++wind:ho:um:am` seems to be able to send a message, or `++meal`, given -a `++soup`. This gate, then, just sets up the things we need to for -`++wind` to do its job. - -We first get `rol`, which is a `++rill`, that is, a particular outbound -stream. This stream is specific to the path on which we're sending. If -the path hasn't been used before, then we create it. We let `sex` be the -number of messages we've already sent on this path. - -Then, we update the outbound stream by incrementing the number of -messages sent and placing an entry in `san.rol` that associates the -message number with the `duct` that sent the message. This allows us to -give the acknowledgment to the one who sent the message. - -We let `cov` be the current life of our crypto and our neighbor's -crypto. At the moment, we only need our neighbor's life, which we put -into the meal. - -Finally, we call `++wind:ho:um:am` with the `++soup` of the path and -message number and the `++meal` of the payload itself. For end-to-end -acknowledged messages, we use `%bund`. - - [%bund p=life q=path r=@ud s=*] :: e2e message - -Looking at how we create the `%bund`, we can easily see what each field -is for. - -Following the trail a little further, we go to `++wind:ho:um:am`. - - ++ wind :: wind:ho:um:am - |= [gom=soup ham=meal] - :: ~& [%wind her gom] - ^+ +> - =^ wyv diz (zuul:diz now ham) - =^ feh puz (whap:puz now gom wyv) - (busk xong:diz feh) - -`++wind` does three things: it (1) encodes the message into a list of -possibly-encrypted packets, (2) puts the message into the packet pump, -and (3) sends any packets that are ready to be sent. Yes, our nice -little linear run of each gate calling exactly one other interesting -gate is over. We'll go in order here. - -`++zuul:lax:as:go` is the what converts a `++meal` into a list of -actual, 1KB packets. - - ++ zuul :: zuul:lax:as:go - |= [now=@da ham=meal] :: encode message - ^- [p=(list rock) q=_+>] - =< weft - ++ wasp :: null security - ++ weft :: fragment message - ++ wisp :: generate message - -For organizational purposes, `++zuul` constructs an internal core with -three arms. `++wasp` encodes the meal into an atom with no encryption. -`++wisp` encodes a meal with possible encryption (else it simply calls -`++wasp`). `++weft` takes the result of `++wisp` and splits it into -actual packets. - - ++ wasp :: null security - ^-([p=skin q=@] [%none (jam ham)]) - -This simply jams the meal, wrapping it with the `skin` of `%none`, -meaning no encryption. - -Since `++wisp` is a little long, we'll go through it line-by-line. - - ++ wisp :: generate message - ^- [[p=skin q=@] q=_..wisp] - -`++wisp` produces a pair of a `skin` and an atom, which is the meal -encoded as a single atom and possibly encrypted. - - ?: =(%carp -.ham) - [wasp ..wisp] - -If the meal that we're encoding is a `%carp`, then we don't encrypt it. -A `%carp` meal is a partial meal, used when a message is more than 1KB. -Since the entire message is already encrypted, we don't need to encrypt -each packet individually again. - - ?: !=(~ yed.caq.dur) - ?> ?=(^ yed.caq.dur) - :_ ..wisp - :- %fast - %^ cat 7 - p.u.yed.caq.dur - (en:r:cluy q.u.yed.caq.dur (jam ham)) - -If we have a symmetric key set up with this neighbor, then we simply use -it. The skin `%fast` is used to indicate a symmetric key. - - ?: &(=(~ lew.wod.dur) |(=(%back -.ham) =(%buck -.ham))) - [wasp ..wisp] - -If we do not yet have our neighbor's will, then there is no way that we -can seal the message so that only they may read it. If what we're -sending is an acknowledgment, then we go ahead and just send it in the -clear. - - =^ tuy +>.$ - ?:(=(~ lew.wod.dur) [*code +>.$] (griz now)) - -If we don't have our neighbor's will, then we "encrypt" with a key of 0. -If we do have their will, then we generate a new symmetric key that we -will propose. - - :_ ..wisp - =+ yig=sen - =+ bil=law.saf :: XX send whole will - =+ hom=(jam ham) - -`yig` will be the life and engine for our current crypto. `bil` is our -will. `hom` is the meal encoded as a single atom. - - ?: =(~ lew.wod.dur) - :- %open - %^ jam - [~ `life`p.yig] - bil - (sign:as:q.yig tuy hom) - -If we do not have our neighbor's will, then we send our current life -along with our will and the message. The message itself is "signed" with -a key of 0. - - :- %full - =+ cay=cluy - %^ jam - [`life`p.cay `life`p.yig] - bil - (seal:as:q.yig pub:ex:r.cay tuy hom) - -- :: --zuul:lax:as:go - -If we do have our neighbor's will, then we send our perception of their -current life, our current life, our will, and the message. The message -is sealed with their public key so that only they can read our message. - -Once we have the message encoded as an atom, `++weft` goes to work. - - ++ weft :: fragment message - ^- [p=(list rock) q=_+>.$] - =^ gim ..weft wisp - :_ +>.$ - ^- (list rock) - -We're going to produce a list of the packets to send. First, we use the -aforementioned `++wisp` to get the message as an atom. - - =+ wit=(met 13 q.gim) - ?< =(0 wit) - -`wit` is the number of 1KB (2\^13 bit) blocks in the message. We assert -that there is at least one block. - - ?: =(1 wit) - =+ yup=(spit [our her] p.gim q.gim) - [yup ~] - -If there is exactly one block, then we just call `++spit` to turn the -message into a packet. We'll explain what `++spit` does momentarily. - - =+ ruv=(rip 13 q.gim) - =+ gom=(shaf %thug q.gim) - =+ inx=0 - -If there is more than one block, then we rip it into blocks in `ruv`. -`gom` is a hash of the message, used as an id. `inx` is the number of -packets we've already made. - - |- ^- (list rock) - ?~ ruv ~ - =+ ^= vie - %+ spit - [our her] - wasp(ham [%carp (ksin p.gim) inx wit gom i.ruv]) - :- vie - $(ruv t.ruv, inx +(inx)) - -Here we package each block into a packet with `++spit` and produce the -list of packets. - - ++ spit :: cake to packet - |= kec=cake ^- @ - =+ wim=(met 3 p.p.kec) - =+ dum=(met 3 q.p.kec) - =+ yax=?:((lte wim 2) 0 ?:((lte wim 4) 1 ?:((lte wim 8) 2 3))) - =+ qax=?:((lte dum 2) 0 ?:((lte dum 4) 1 ?:((lte dum 8) 2 3))) - =+ wix=(bex +(yax)) - =+ vix=(bex +(qax)) - =+ bod=:(mix p.p.kec (lsh 3 wix q.p.kec) (lsh 3 (add wix vix) r.kec)) - =+ tay=(ksin q.kec) - %+ mix - %+ can 0 - :~ [3 1] - [20 (mug bod)] - [2 yax] - [2 qax] - [5 tay] - == - (lsh 5 1 bod) - -This is how we turn a message into a real packet. This has the -definition of the packet format. - -`wim` is the length of the sending ship, and `dum` is the length of the -receiving ship. There are only five possibilities for each of those, -corresponding to carriers, cruisers, destroyers, yachts, and submarines. -These are encoded in `yax` and `qax` as 0, 0, 1, 2, and 3, respectively. -Thus, `wix` and `vix` are the number of bytes that must be reserved for -the ship names in a packet. - -Next, we construct `bod` by simply concatenating the sending ship, the -receiving ship, and the body of the message. Then, we get the encryption -mechanism from `++skin`, which may be a 0, 1, 2, or 3, and put it in -`tay`. - -Next, we concatenate together, bit by bit, some final metadata. We use -three bits for our protocol number, which is incremented modulo eight -when there is a continuity breach or the protocol changes. We use the -final twenty bits of a hash of the body (which, we suppose, makes it a -twenty bit hash) for error-checking. We use two bits to tell how much -room is used in the body for the sending ship, and another two bits for -the receiving ship. Finally, we use five bits to store the encryption -type. Note that since there are only two bits worth of encryption types, -there are three unused bits here. This adds up to 32 bits of header -data. Finally, we concatenate this onto the front of the packet. Thus, -we can summarize the packet header format as follows. - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |Proto| Hash of Body |yax|qax| Crypto | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - -After this, there are `yax` bits of the sender name, `qax` bits of the -receiver name, and up to 8192 bits of data. Thus, the maximum size of a -packet is achieved in a message between two submarines with 8192 bits of -data. This will require 32+128+128+8192 = 8480 bits, or 1060 bytes. - -This concludes our discussion of `++zuul:lax:as:go`. If you recall from -`++wind:ho:um:am`, the list of packets from `++zuul` is passed into -`++whap:pu` to update the packet pump and get any packets that can be -sent immediately. - - ++ whap :: whap:pu - |= [now=@da gom=soup wyv=(list rock)] :: send a message - ^- [(list rock) _+>] - =. pyz (~(put by pyz) gom (lent wyv)) - =. +> - |- ^+ +>.^$ - ?~ wyv +>.^$ - %= $ - wyv t.wyv - nus +(nus) - diq (~(put by diq) (shaf %flap i.wyv) nus) - puq (~(put to puq) [nus `soul`[gom 0 | ~2000.1.1 i.wyv]]) - == - (harv now) - -First, we put into `pyz` the id for this message and the number of its -packets that have not yet been acknowledged, which is of course the -total number of packets since we haven't even sent the packets. - -For every packet, we change three things in the state (`++shed`) of our -packet pump: (1) we increment `nus`, the number of packets sent; (2) we -put the packet number into `diq` keyed by a hash of the packet; and (3) -we put the packet into the packet queue, with the basic metadata of its -id `gom`, 0 transmissions, not live yet, last sent in the year 2000, and -the packet itself. - -Finally, we harvest the packet pump. - - ++ harv :: harv:pu - |= now=@da :: harvest queue - ^- [(list rock) _+>] - ?: =(~ puq) [~ +>(rtn ~)] - ?. (gth caw nif) [~ +>] - =+ wid=(sub caw nif) - =| rub=(list rock) - =< abet =< apse - |% - -`++harv` contains a core for most of its work. The meat is in `++apse`. -First, though, it sets itself up. If there aren't any packets in the -queue, then we simply do nothing except set `rtn`, our next timeout, to -nil because we don't have any packets that may need to be retransmitted. -If we have more live (that is, sent and unacknowledged) packets than our -window size, then we don't do anything. - -Otherwise, we let `wid` be the width of our remaining packet window, and -we initialize `rub` to nil. `rub` will be the list of packets that are -ready to be sent. We then call `++apse` and pass the result to `++abet`. -`++apse` decides which packets are ready to be sent. - - ++ apse - ^+ . - ?~ puq . - ?: =(0 wid) . - => rigt =< left - ?> ?=(^ puq) - ?: =(0 wid) . - ?. =(| liv.q.n.puq) . - :: ~& [%harv nux.q.n.puq p.n.puq] - %_ . - wid (dec wid) - rub [pac.q.n.puq rub] - nif +(nif) - liv.q.n.puq & - nux.q.n.puq +(nux.q.n.puq) - lys.q.n.puq now - == - -If there are no remaining packets to send, or if we've filled the packet -window, do nothing. We call `++rigt` and `++left` to process the left -and right branches of the packet queue. - -Now we assert that the queue is not empty, and we again check that we -haven't filled the packet window. We will operate on the head of the -queue. If the packet is live, then do nothing. Otherwise, we go ahead -and send it. - -To send, we (1) decrement `wid`, our packet window width; (2) cons the -packet onto the `rub`, which will be returned as the list of packets to -send; (3) increment `nif`, the number of live packets; (4) set the -packet to be live; (5) increment the number of transmissions of the -packet; and (6) set the last sent time of the packet to now. - - ++ left - ?> ?=(^ puq) - ^+(. =+(lef=apse(puq l.puq) lef(puq [n.puq puq.lef r.puq]))) - ++ rigt - ?> ?=(^ puq) - ^+(. =+(rig=apse(puq r.puq) rig(puq [n.puq l.puq puq.rig]))) - -These do exactly what you would expect: they traverse the packet queue -so that `++apse` gets called recursively through it. - -Finally, `++abet` gets called, which resolves the changes. - - ++ abet - ?~ rub [~ +>.$] - [(flop rub) +>.$(rtn [~ (add rto now)])] - -This returns the packets that we wish to send, and it updates the -timeout so that we know when to try resending unacknowledged packets. - -This concludes our discussion of `++whap:pu`. To finish -`++wind:ho:um:am`, we just need to delve into `++busk:ho:um:am`. But -wait, in the call to `++busk`, the first argument is `xong:diz`. What is -this? This, my dear reader, is one more detour, this time into -`++xong:lax:as:go`. - - ++ xong :: xong:lax:as:go - ^- (list ship) :: route unto - =+ [fro=xen too=xeno] - =+ ^= oot ^- (list ship) - =| oot=(list ship) - |- ^+ oot - ?~ too ~ - ?: (lien fro |=(a=ship =(a i.too))) ~ - [i.too $(too t.too)] - :: ~& [%xong-to [our her] (weld oot ?>(?=(^ fro) t.fro))] - (weld oot ?>(?=(^ fro) t.fro)) - -This gets the list of intermediate ships needed to get a packet from us -to our neighbor. First, we get `fro` and `too`, the "canons" of ourself -and our neighbor, respectively. - -What is this "canon", you ask? A canon is simply a ship plus its -"ancestors", as defined by `++sein`. For example, the canon of -`~hoclur-bicrel` is: - - ~hoclur-bicrel/try=> (saxo ~hoclur-bicrel) - ~[~hoclur-bicrel ~tasruc ~tug] - -If we follow the algorithm in `++xong`, we see that we are simply -creating a list of ships that form a path from our neighbor to ourself. -Essentially, we look through the canon of our neighbor until we find -something in our own cannon -- a common ancestor. Or, if we are from -different carriers, then there is no common ancestor. We then weld this -onto the tail of our own canon. In the end, this is simply a list of -possible ships to try to route via to get to our neighbor, ordered by -preferability (that is, closeness to our neighbor). We will end up -trying, in order, to find a lane to these. - -Now, we can finally get to `++busk:ho:um:am`. - - ++ busk :: busk:ho:um:am - |= [waz=(list ship) pax=(list rock)] :: send packets - %_ +> - bin - |- ^+ bin - ?~ pax bin - $(pax t.pax, bin (weld (flop (wist:diz now waz ~ i.pax)) bin)) - == - -Thankfully, `++busk` is fairly simple. We go through the list of packets -and convert them to `++boon`s with `++wist:lax:as:go`. These boons are -placed into `bin`, and they end up getting processed by `++clop` (this -happens in `++knob`). - - ++ wist :: wist:lax:as:go - |= $: now=@da :: route via - waz=(list ,@p) - ryn=(unit lane) - pac=rock - == - ^- (list boon) - ?: =(our her) [[%ouzo *lane pac] ~] - ?~ waz ~ - =+ dyr=?:(=(her i.waz) dur (gur i.waz)) - ?. ?& !=(our i.waz) - ?=(^ lun.wod.dyr) - == - $(waz t.waz) - :_ ?: ?=(%ix -.u.lun.wod.dyr) - $(waz t.waz) - ~ - :+ %ouzo u.lun.wod.dyr - ?: &(=(i.waz her) =(~ ryn)) pac - =+ mal=(jam `meal`[%fore her ryn pac]) - %- spit - ^- cake - :* [our i.waz] - ?~ yed.caq.dyr [%none mal] - :- %fast - %^ cat 7 - p.u.yed.caq.dyr - (en:crua q.u.yed.caq.dyr mal) - == - -This takes a sample of the current time, the list of ships that we just -generated, a lane if we already know it, and the packet itself. - -First, if we are sending a message to ourself, then we simply create a -`%ouzo` boon with a bunted lane. Otherwise, if there are no routing -candidates, there is nothing we can do, so we return nil. - -Next, we get the `dore` of the first routing candidate. If we're looking -at the neighbor to whom we're trying to send the message, then we simply -use the `dore` that we already have. Otherwise, we get a default `dore`. - -If we're the first routing candidate, or if we have don't have a lane to -this candidate, then we skip this candidate and move on to the next one. - -If we have only a provisional ip address, then we try to send on it, but -we also try to send on later routing candidates as well. Otherwise, we -only send on this one candidate. - -Finally, we create the actual `%ouzo` boon. The lane is the one from our -`dore`. If we're sending it directly to our intended recipient, and we -haven't been told to use a specific lane, then we just send the packet -directly. Otherwise, we wrap it in a little `%fore` meal, telling the -intermediary to whom we wish it to be sent. If we have already set up a -symmetric key with the intermediary, then we encrypt it with that. -Otherwise, we send it in the clear. - -Now, if you recall, we have traced all the way through from the -beginning when, in `++knob`, the `%wont` card was handled by a call to -`++wise`. There is only one more step before the packet is finally sent. -Looking in `++knob`, we see that the resultant list of boons is passed -into `++clop`, which will execute the correct actions and return a list -of moves. In `++clop`, we see the handling of each specific boon. The -one we are interested in is `%ouzo`, since that is the only one we have -sent thus far. - - %ouzo - :: ~& [%send now p.bon `@p`(mug (shaf %flap q.bon))] - :_ fox - [[gad.fox [%give %send p.bon q.bon]] ~] - -Very simply, we give a `%send` gift along the special duct that goes -straight into the bowels of unix. This is the last stop before we drop -into vere, and later libuv. And then... the world. - -The packet, after its creation, embarks on a journey across physical -time and space into the great unknown. Hurtling through fiber-optic -cables at hundreds of thousands of kilometers per second, it finally -arrives at our neighbor's network adapter. The adapter tells unix, unix -tells libuv, libuv tells vere, and vere sends a `%hear` kiss to ames. -And now we reenter the kernel. - -The `%hear` kiss goes straight to `++knob`, just as did the `%wont` kiss -earlier. - - %hear - (~(gnaw am [now fox]) %good p.kyz q.kyz) - -Here, though, we call `++gnaw:am` to process the packet. The arguments -to `++gnaw` are the same as those to the `%hear` kiss: the lane on which -the packet was received and the packet itself. The other argument is -just `%good`, which is a `++cape` saying that we expect the packet to -succeed. If a formal error occurs, then since we have a transactional -event system, the `%hear` event will never be considered to have -actually happened, and unix will send a `%hole` kiss so that we may send -a negative acknowledgment. - - ++ gnaw :: gnaw:am - |= [kay=cape ryn=lane pac=rock] :: process packet - ^- [p=(list boon) q=fort] - ?. =(2 (end 0 3 pac)) [~ fox] - =+ kec=(bite pac) - ?: (goop p.p.kec) [~ fox] - ?. (~(has by urb.ton.fox) q.p.kec) - [~ fox] - =< zork - =< zank - %- ~(chew la:(ho:(um q.p.kec) p.p.kec) kay ryn %none (shaf %flap pac)) - [q.kec r.kec] - -First, we check the protocol number. If it is not correct, then we -simply ignore the packet entirely. Otherwise, we parse the packet with -`++bite`, which converts a packet atom into a `cake`, that is, a triple -of the `sock` (pair of sender and receiver), the `skin` (encryption -type), and the data. - - ++ bite :: packet to cake - |= pac=rock ^- cake - =+ [mag=(end 5 1 pac) bod=(rsh 5 1 pac)] - =+ :* vez=(end 0 3 mag) :: protocol version - chk=(cut 0 [3 20] mag) :: checksum - wix=(bex +((cut 0 [23 2] mag))) :: width of receiver - vix=(bex +((cut 0 [25 2] mag))) :: width of sender - tay=(cut 0 [27 5] mag) :: message type - == - ?> =(2 vez) - ?> =(chk (end 0 20 (mug bod))) - :+ [(end 3 wix bod) (cut 3 [wix vix] bod)] - (kins tay) - (rsh 3 (add wix vix) bod) - -This is exactly the inverse of `++spit`. Note that here we check both -the protocol number and the hash, crashing on error. Remember that a -crash will result in a negative acknowledgment being sent. - -Continuing in `++gnaw`, we see that if the intended recipient is not on -our pier, then we drop the packet. - -If we've gotten this far, then we wish to process the packet. Recall -that `++ho` and `++um` set up the domestic server and foreign client -cores, respectively, and that `++zork` and `++zank` resolve any changes -to these cores. - -The new stuff here, then, is the `++la` core and the `++chew` arm. The -`++la` sets up a core for this particular packet, containing the current -success/failure `cape`, the lane it was sent on, the encryption type, -and a hash of the packet, used as an id. - -`++chew` is called with the encryption type and the message itself. It -contains a little helper core inside of it, which starts immediately -with `++apse`. - - ++ apse - ^+ +>.$ - =+ oub=bust:puz - =+ neg==(~ yed.caq.dur.diz) - =. +>.$ east - =+ eng==(~ yed.caq.dur.diz) - =+ bou=bust:puz - =. bin - ?. &(oub !bou) bin - :_(bin [%wine [our her] " is ok"]) - =. bin - ?. &(neg !eng) bin - :_(bin [%wine [our her] " is your neighbor"]) - +>.$ - -First, we let `oub` be true if our neighbor hasn't been responding to us -for more than sixteen seconds. Let `neg` be true if we haven't yet -proposed a symmetric key, meaning that we haven't yet corresponded with -this ship, so they are not our neighbor. Next, we run `++east`, which -we'll go into in just a minute. - -We now do the same two checks and store the results in `eng` and `bou`. -If our neighbor has, like the prodigal son, returned after an extended -absense, then we send a `%wine` boon as the proverbial fatted calf, -which is simply printed out to the console. Likewise, if we are meeting -one with whom we have never had the pleasure of acquainting ourselves, -we send a message to the console to that effect. - -We skipped over `++east`, which contains the meat of the processing. It -first decrypts the message, then calls `++chow:la:ho:um:am` with the -resultant meal. We'll go through each of the four cases in turn, but -first since each one calls `++bilk:pu`, we'll take a brief detour. - - ++ bilk :: bilk:pu - |= now=@da :: inbound packet - ^+ +> - =+ trt=(mul 2 rtt) - %= +>.$ - rue [~ now] - rto trt - rtn ?~(puq ~ [~ (add now trt)]) - == - -This updates the timing information in our packet pump. `rue`, the last -time we have heard from this neighbor, is set to now. `rto`, the -retransmit timeout is set to twice the current ping time, and if there -is anything in the packet queue, then we reset the next timeout, since -we've just heard a message. - -Back to `++east`. - - %none - =. puz (bilk:puz now) - (chow ((hard meal) (cue msg))) - -The simplest case is when the encryption type is `%none`. We first call -`++bilk` to update the packet pump, then we cue (unjam) the message into -a meal. We hard cast it into a meal -- if the cast fails, then we do -want to crash since someone is sending us malformed data. Finally, we -send the result to `++chow` for interpretation and handling. - - %fast - =+ [mag=`hand`(end 7 1 msg) bod=(rsh 7 1 msg)] - =+ dey=(kuch:diz mag) - ?~ dey - ~& [%bad-key her mag] - +>.$ :: ignore unknown key - =. puz (bilk:puz now) - =^ key diz u.dey - (chow(aut sin) ((hard meal) (cue (dy:q:sen:gus key bod)))) - -For symmetric encryption, we first get the `hand`, which is the hash of -the symmetric key. We pass it to `++kuch:lax:as:go`, which returns the -key if we either have used it before or we have proposed it. If we have -proposed it, then we change its status from proposed to real. If -`++kuch` fails, then we drop the packet and print out a `%bad-key` -message. - -Otherwise, we call `++bilk` as before to update the packet pump and pass -into `++chow` the decrypted data. - - %full - =+ mex=((hard ,[p=[p=life q=life] q=will r=@]) (cue msg)) - =. diz (deng:diz q.mex) - =+ wug=cluy:diz - ?> =(q.p.mex p.wug) - =+ gey=(sev:gus p.p.mex) - =+ mes=(need (tear:as:q.gey pub:ex:r.wug r.mex)) - =. diz (wasc:diz p.mes) - =. puz (bilk:puz now) - (west(msg q.mes)) - -For sealed asymmetric encryption, we first take off the the layer of -data that gives us the life and will of our neighbor, and we apply try -to extend their former will with the new data. `++deng` will fail if -this is impossible. - -Next, we get our most current understanding of our neighbor's crypto, -and we verify that it's the same life as what they're sending. Then, we -get our own crypto from `++sev` and decrypt the message with the public -key from our neighbor's crypto. We register the proposed symmetric key, -update the packet pump, and call `++west`, which simply casts the -message to a meal and calls `++chow`, reporting any error. - - %open - =+ mex=((hard ,[p=[~ q=life] q=will r=@]) (cue msg)) - =. diz (deng:diz q.mex) - =+ wug=cluy:diz - ?> =(q.p.mex p.wug) - =+ mes=(need (sure:as:r.wug *code r.mex)) - =. puz (bilk:puz now) - (west(msg mes)) - -Finally, for signed asymmetric encryption, we, as before, take off the -layer of data that gives us the life and will of our neighbor. This -time, of course, we do not get our own crypto -- only that of our -neighbor. - -The rest you have seen. We call `++deng` to extend the will, we verify -that their crypto life is what we think it ought to be, we "decrypt" the -data, we update the packet pump, and we call `++west` to call `++chow`. - - ++ chow :: chow:la:ho:um:am - |= fud=meal :: interpret meal - ^+ +> - =. diz ?:(=(%none aut) diz (wast:diz ryn)) - (dine fud) - -Here, if the message was encrypted at all, then we call -`++wast:lax:as:go`, which simply updates the lane (route) to our -neighbor (unless we're given a provisional route). This ensures that we -always have the most direct possible path to them. - -We've been handling this meal for so long, we've almost forgotten what -we want to do with it. The telos is of any meal to be dined on. We will -choose out the cases here that are important to our current -investigation. - - %fore - =+ ^= lyn ^- lane - ?~ q.fud ryn - ?. ?=(%if -.u.q.fud) u.q.fud - [%ix +.u.q.fud] - :: u.q.fud - ?: =(our p.fud) - (emit %mead lyn r.fud) - =+ zid=(myx:gus p.fud) - (emir (wist:zid now xong:zid [~ lyn] r.fud)) - -Forwarding is the simplest case, since we've seen all the arms before, -except perhaps `++emit` and `++emir`, which simply take a boon or list -of boons respectively and queue them up to be handled when the core -resolves. If we're told to forward a packet to ourselves, then we emit a -`%mead` boon which simply sends another `%hear` kiss to ourselves with -the data. Otherwise, we try to find a route to the recipient, as before. - - %carp - =+ zol=(~(get by olz.weg) s.fud) - ?^ zol cock(kay u.zol) - =^ neb nys.weg - =+ neb=(~(get by nys.weg) s.fud) - ?^ neb [u.neb nys.weg] - =+ neb=`bait`[(kins p.fud) 0 r.fud ~] - [neb (~(put by nys.weg) s.fud neb)] - ?> (lth q.fud p.r.neb) - ?> =((kins p.fud) p.neb) - ?> =(r.fud p.r.neb) - =+ doy=`(unit ,@)`(~(get by q.r.neb) q.fud) - ?^ doy cock - => ^+ . %= . - q.r.neb (~(put by q.r.neb) q.fud t.fud) - q.neb +(q.neb) - == - :: ~& [%carp q.fud s.fud q.neb p.r.neb] - ?: =(q.neb p.r.neb) - =: nys.weg (~(del by nys.weg) s.fud) - olz.weg (~(put by olz.weg) s.fud kay) - == - (golf p.neb r.neb) - =. +>.$ cock - +>.$(nys.weg (~(put by nys.weg) s.fud neb)) - -Here, we have received a partial message, and we're just assembling the -individual packets into a message. Most of this code is fairly -algorithmic, so we'll just hit the high points. In the beginning, we -check if we've already received this message, and if so, we resend the -acknowledgment. Remember, "always ack a dupe, never ack an ack". - -In `nys.weg` we keep track of an incoming set of partial packets, -indexed by the `flap` hash that comes with every packet. We check to see -if we have already received this partial message, and if so we -acknowledge it. Otherwise, we put it in `nys.weg` unless this is the -last message, in which case we ack the last partial message, move the -complete message into `olz.weg`, and call `++golf`, which assembles the -message and calls `++chew`, to start the dance again with the complete -message. - - %bund - :: ~& [%bund q.fud r.fud] - ?> =(p:sen:gus p.fud) - (deer q.fud r.fud ?-(kay %dead ~, %good [~ s.fud])) - -What if we're just receiving a regular old, garden variety message? We -call `++deer` with the data from the message. If we already know that -the message processing will fail (that is, if we got a `%hole` card from -unix rather than a `%hear` card), then we don't even send the data at -all. Remember, if a packet fails to process, it's as if it never even -arrived, except that we send a negative acknowledgment. - - ++ deer :: deer:la:ho:um:am - |= [cha=path num=@ud dut=(unit)] :: interpret message - ^+ +> - =+ rum=(fall (~(get by raz.bah) cha) *race) - %= +>.$ - +> - ?. (gte num did.rum) :: always ack a dup - (cook (~(get by bum.rum) num) cha ~ ryn dam) - ?: dod.rum - (coat cha rum(mis (~(put by mis.rum) num [kay ryn dam dut]))) - %= +>.+>.$ - raz.bah - %+ ~(put by raz.bah) cha - rum(mis (~(put by mis.rum) num [kay ryn dam dut])) - == - == - -First, we get the race for this particular triple of sender, receiver, -and path, creating it if it doesn't exist. If we've already acked the -message, then we resend the ack. Note that `did.rum` is the number of -packets we acknowledged, positively or negatively while `bum.rum` is a -map of message numbers to negative acknowledgments. Thus, if a message -number is less than `did.rum`, then if it's in `bum.rum` then it was -negatively acknowledged, otherwise it's postively acknowledged. Thus, we -are constant in space with the number of successful messages and linear -in the number of failed messages. We'll document `++cook` later on, but -suffice it to say that it sends an acknowledgment. It is to end-to-end -acknowledgments what `++cock` is to packet-level acknowledgments. - -If we are still processing a message (that is, `dod.rum` is false), then -we simply put this message in the map of misordered packets to be -processed when their time comes. "Processing a message" in this case -means that we've received the message and notified the correct -application, but we're still waiting for the application-level -acknowledgment. - -Otherwise, we're ready for a packet, so we process it. - - ++ coat :: coat:ho:um:am - |= [cha=path rum=race] :: update input race - ^+ +> - =+ cun=(~(get by mis.rum) did.rum) - ?~ cun - +>.$(raz.bah (~(put by raz.bah) cha rum)) - ?. =(%good p.u.cun) +>.$ - ?> ?=(^ s.u.cun) - %= +>.$ - raz.bah (~(put by raz.bah) cha rum(dod |)) - bin - :_ bin - :^ %mulk - [our her] - `soap`[[p:sen:gus clon:diz] cha did.rum] - u.s.u.cun - == - -First, we grab the message we want to process and store it in `cun`. If -it's a good packet, then we change `dod.rum` to false, meaning that -we're in the middle of processing a packet and should not start -processing another one. We also put a `%mulk` boon into the queue so -that, when it all resolves, we send a mesage to the intended recipient -application. The boon contains the sender, the receiver, the identity of -the message, and the message itself. - -This bubbles up all the way back to `++knob`, where we were handling the -`%hear` card. Following the logic in `++knob`, we can see that the boons -get sent into `++clop` to be turned into actual arvo-level moves. We've -been here before, if you recall, when we handled the `%cake` boon to -send a message. Now, we're handling the `%mulk` boon, which is -unfortunately slightly more complicated. - - %mulk - :: ~& [%mulk p.bon q.bon] - ?> ?=([@ @ *] q.q.bon) - ?> ?=(%q i.q.q.bon) - ?+ i.t.q.q.bon - ~& %mulk-bad - :_ fox - :~ :- (claw p.p.bon) - [%sick %wart p.bon i.t.q.q.bon t.t.q.q.bon r.bon] - == - %ge :: %gall request - ?> ?=([@ ~] t.t.q.q.bon) - =+ app=`term`(need ((sand %tas) i.t.t.q.q.bon)) - =+ ^= pax - :+ (scot %p p.p.bon) - (scot %p q.p.bon) - q.q.bon - :_ fox [hen %pass pax %g %rote p.bon app r.bon]~ - %gh :: %gall response - ?> ?=([@ ~] t.t.q.q.bon) - =+ app=`term`(need ((sand %tas) i.t.t.q.q.bon)) - =+ ^= pax - :+ (scot %p p.p.bon) - (scot %p q.p.bon) - q.q.bon - :_ fox [hen %pass pax %g %roth p.bon app r.bon]~ - == - -We're dispatching messages based on the prefix of their path. Since only -`%gall` apps use end-to-end acknowledgments at the moment, every path -must have at least two elements, and the first one must be `%q`. Beyond -that, we handle the `/q/ge` and `/q/gh` cases for gall requests and -responses, respectively. - -In both cases, we require the next term in the path to be the name of -the intended recipient `%gall` app. Thus, a message to `/q/ge/talk` for -example, will send a message to the talk app. - -We then send a message to the app itself. The message is either a -`%rote` or a `%roth` for a request and a response, respectively. The -content is the `rook` or `roon` that was sent (stored in `r.bon`), but -we don't actually handle that at all here. That's completely a -`%gall`-level thing. We're just the messenger. - -Notice the path we send this over. We encode the sender, the receiver, -and the path over which it was sent. This fully specifies the `race` so -that when the app gives us the acknowledgment we know where to send it. - -We now have another interlude. We have entrusted our precious data, so -carefully guarded and guided from the app on that far-away ship, to our -local app. It has the ability to do whatever it pleases with it. It may -take a significant amount of time to process. When the message has been -handled by this app, though, it must produce an acknowledgment. Our -final task is to deliver this acknowledgment to the sending app. - -We should describe here what exactly these oft-mentioned acknowledgments -actually consist of. There are two kinds of acknowledgments: positive -and negative. A positive acknowledgment contains no data other than its -existence. A negative acknowledgment may optionally include a reason for -said negativity. Formally, a negative acknowledgment is an `ares`, which -is a unit pair of a term and a list of tanks. If this is null, this is -simply a failure with no associated information. If the pair exists, the -term is a short error code that is usually both human and computer -readable. For example, if you try to send a message to a valid `%gall` -app that doesn't have any `++poke` to handle it, then `%gall` will give -a negative acknowledgment with error term `%poke-find-fail`. The list of -tanks is a human-readable description of the error. This often contains -a stack trace. At any rate, all this information is returned to the -sending app on the other end of the wire. - -After this brief interlude, our story resumes in `++knap`, where we -receive responses. In particular, a `%mean` indicates a negative -acknowledgment while a `%nice` indicates a positive acknowledgment. - - ?(%mean %nice) - ?> ?=([@ @ @ *] tea) - =+ soq=[(slav %p i.tea) (slav %p i.t.tea)] - =+ pax=t.t.tea - =+ ^= fuy - =< zork =< zank - %^ ~(rack am [now fox]) soq pax - ?-(+<.sih %mean `p.+.sih, %nice ~) - => %_(. fox q.fuy) - =| out=(list move) - |- ^- [p=(list move) q=_+>.^$] - ?~ p.fuy - [(flop out) +>.^$] - =^ toe fox (clop now hen i.p.fuy) - $(p.fuy t.p.fuy, out (weld (flop toe) out)) - -Recall the format of the path we sent the message on, and you'll -understand why `soq` and `pax` are the sender/receiver pair and path on -which the message was sent. The rest of this is structured much like -`++knob`, so we call `++rack:am` and send the resulting boons to -`++clop`. Business as usual. - - ++ rack :: rack:am - |= [soq=sock cha=path cop=coop] :: e2e ack - =+ oh=(ho:(um p.soq) q.soq) - =. oh (cook:oh cop cha ~) - (cans:oh cha) - -First, we set up `++um` and `++ho`, as we've done twice before, for our -domestic and foreign servers, respectively. The other two things are -new, though. Well, `++cook` is not actually new, but we delayed the -explanation saying only that it sends an acknowledgment. The time has -come. - - ++ cook :: cook:ho:um:am - |= [cop=coop cha=path ram=(unit ,[ryn=lane dam=flap])] - ^+ +> :: acknowledgment - =+ rum=(need (~(get by raz.bah) cha)) - =+ lat=(~(get by mis.rum) did.rum) - ?: &(?=(~ lat) ?=(~ ram)) ~&(%ack-late-or-redundant +>.$) - =+ ^- [ryn=lane dam=flap] - ?^ ram [ryn.u.ram dam.u.ram] - ?< ?=(~ lat) - [q r]:u.lat - =. raz.bah - ?^ ram raz.bah - %+ ~(put by raz.bah) cha - rum(dod &, bum ?~(cop bum.rum (~(put by bum.rum) did.rum u.cop))) - =^ roc diz (zuul:diz now [%buck cop dam ~s0]) - (busk(diz (wast:diz ryn)) xong:diz roc) - -If we are acknowledging a message that we have already acked, the `ram` -will contain the new lane and flap to send the duplicate ack to. This -happens if we call `++cook` in `++deer`, but it doesn't happen from -`++rack`. If there is no message waiting to be acknowledged and we're -not given an explicit lane and flap (that is, we're not sending a -duplicate ack), then the app must have sent us multiple acknowledgments. -We do the only sensible thing we can do and drop all acknowledgments -after the first, printing a message. This is, in fact, an error, so it -could be argued that we ought to crash. Whatever you do, don't depend on -this not crashing. - -First, we grab the race specified by the given path, and we get the most -recent in-order message, which must be the one which is being -acknowledged. - -Then, we decide which lane/flap to respond on/to. Basically, in the -usual case we respond on the lane through which the initial message was -sent, which is stored along with the other packet information in -`mis.rum`, since it has to be remembered across calls to ames. However, -if we receive a duplicate message, then we must respond to the new -message. It's quite possible the reason the other acknowledgment didn't -get returned was that the lane between the ships was broken. - -At any rate, we update the race by saying that we've finished processing -this packet (unless we're sending a duplicate ack) and, if we're sending -a negative acknowledgment, putting the negative ack into `bum.rum` so -that we can resend it if necessary. - -We encode our new message, updating the packet pump, with `++zuul`, as -before, and we send it off with `++busk`, routed via `++wast` to one of -the ships in `++xong`. Of course, in practice, we don't even look at the -ships in `++xong` because we already have a lane directly to our -neighbor (the one over which they sent their message to us). - -We glossed over the actual message we're sending back. We're sending a -`%buck` meal, which is an acknowledgment. The `cop` specifies whether -this is a positive or a negative ack, `dam` specifies the message we're -acknowledging, and the `~s0` is a placeholder for the processing time -required. This time is neither calculated (though it is hopefully -obvious how to do so) nor used at present, but this information may be -used in the future for improved congestion control. Since the round-trip -time for an end-to-end acknowledged packet includes the processing time -on the other end, most common congestion control algorithms will stumble -when some messages take much longer to process than others. As noted, -though, this is simply an opportunity for improvement -- our congestion -control algorithms are relatively naive at the moment. - -Recall that `++busk` calls `++wist` to put the actual `%ouzo` boon in -the queue, which gets handled by `++clop` to actually send the message. -This is the same pipeline as sending any other message, so we'll refer -you to the explanation above if you've forgotten it. - -The last thing we need to do on this ship is move on to the next packet -in the queue if there is one. If you recall, in `++rack` after the call -to `++cook` there was a call to `++cans:ho:um:am`. - - ++ cans :: cans:ho:um:am - |= cha=path - =+ rum=(need (~(get by raz.bah) cha)) - =. rum - %= rum - did +(did.rum) - mis (~(del by mis.rum) did.rum) - == - (coat cha rum) - -This is very simple. We increment the number of packets that we've -acknowledged on this race and we delete the packet that we just -acknowledged from the set of misordered packets. - -Then, we call `++coat` again to process the next packet if we've already -received it. And that's it for this. - -The acknowledgment now travels the same path that its forebearer, the -original message, once tread, but this time not into the great unknown. -The weary traveler is seeking out its familial roots, finding the app -from whom sprung forth the original message way back in paragraph three. -When it arrives at the network adapter of its ancestors, the adapter -tells unix, unix tells libuv, libuv tells vere, and vere sends a `%hear` -kiss to ames. Once more into the kernel. - -The `%hear` kiss is handled in `++knob` as before, leading to `++gnaw`, -going over to `++chew`, `++apse`, `++chow`, and eventualy to `++dine`. -We've seen most of the cases in `++dine`, but we haven't yet looked at -the handling of this `%buck` meal. - - %buck - =. +> ?.(=(%full aut) +> cock) :: finish key exch - +>(..la (tock p.fud q.fud r.fud)) - -We send a packet level acknowledgment if we're finishing a key exchange, -else we call `++tock` to process the acknowledgment. - -This will get a little involved, so if you don't much care about how -exactly an acknowledgment happens, just know that the result gets gifted -as a `%woot` card back to the app who sent it. For those brave souls who -wish to see this thing through to the end, it's once more into the -breach. - - ++ tock :: tock:ho:um:am - |= [cop=coop fap=flap cot=@dr] :: e2e ack by hash - ^+ +> - =^ yoh puz (bick:puz now fap) - =. +>.$ - ?~ p.yoh +>.$ - =^ hud +>.$ - (done p.u.p.yoh q.u.p.yoh) - ?~ hud +>.$ - %= +>.$ - bin - :_ bin - `boon`[%cake [our her] [[p:sen:gus clon:diz] u.p.yoh] cop u.hud] - == - (busk xong:diz q.yoh) - -We're going to work through this one a little backwards since it's -mostly fairly simple except the call to `++bick:pu`. In fact, we'll just -skip `++bick` for the moment and finish the rest. - -If `++bick` succesfully acks the message, then we call `++done`. - - ++ done :: done:ho:um:am - |= [cha=path num=@ud] :: complete outgoing - ^- [(unit duct) _+>] - =+ rol=(need (~(get by ryl.bah) cha)) - =+ rix=(~(get by san.rol) num) - ?~ rix [~ +>.$] - :- rix - %_ +>.$ - ryl.bah - (~(put by ryl.bah) cha rol(san (~(del by san.rol) num))) - == - -This very simply gets the rill (the outgoing counterpart to a race, if -you recall), pulls out of the map of outstanding messages the duct over -which the original message was sent, and produces this duct while -deleting that entry from the map of outstanding messages. - -Going back to `++tock`, we now have the duct we need to return the -result over. We do the very sensible thing and put a `%cake` boon in the -queue to be processed later by `++clop`. - -In `q.yoh` we have a list of messages that may need to be sent, which we -pass to `++busk` to send, as usual. When an acknowledgment arrives, that -may trigger other messages immediately. This often happens when sending -more messages than the width of the logical window since for congestion -control reasons another message cannot be sent until some of the earlier -ones have been acknowledged. - -We'll look at the processing of the `%cake` boon in `++clop` before we -get back to talking about `++bick`. - - %cake - :_ fox - :~ [s.bon %give %woot q.p.bon r.bon] - == - -We very simply give, along the duct we found above, a `%woot` card with -the ship who sent us the ack and the ack itself. This allows the -application to decide what to do about the result. In case of a failure, -we usually either resend the message or display it to the user. -Sometimes, we recognize the error term and handle it internally. In any -case, the decision of how to handle the acknowledgment is entirely up to -the application. Our job is done. - -Well, except that we skipped `++bick:pu`. Let's go back to that. - - ++ bick :: bick:pu - |= [now=@da fap=flap] :: ack by hash - ^- [[p=(unit soup) q=(list rock)] _+>] - =+ sun=(~(get by diq) fap) - ?~ sun - [[~ ~] +>.$] - =. diq (~(del by diq) fap) - =^ gub +>.$ (bock now u.sun) - =^ yop +>.$ (harv now) - [[gub yop] +>.$] - -If you recall, in `++whap:pu` we created the packet pump's -representation of the message, which included putting the message into -`diq`, which maps from packet hashes to packet sequence numbers. Thus, -`u.sun` is the sequence number of this particular message. - -We delete this message from `diq` since we have now received an ack for -it. We call `++bock` to perform the ack by sequence number. We call -`++harv` to harvest the packet queue, sending any messages that are now -able to be sent. - -In `++bock`, there are three arms we haven't seen before: `++bine`, -`+wept`, and `++beet`. We'll describe each of these before we get to -`++bock`. `++bine` looks scariest. - - ++ bine :: bine:pu - |= [now=@da num=@ud] :: apply ack - ^- [(unit soup) _+>] - ?~ puq !! - ?. =(num p.n.puq) - ?: (gth num p.n.puq) - =+ lef=$(puq l.puq) - [-.lef +.lef(puq [n.puq puq.lef r.puq])] - =+ rig=$(puq r.puq) - [-.rig +.rig(puq [n.puq l.puq puq.rig])] - =: rtt ?. &(liv.q.n.puq =(1 nux.q.n.puq)) rtt - =+ gap=(sub now lys.q.n.puq) - :: ~& [%bock-trip num (div gap (div ~s1 1.000))] - (div (add (mul 2 rtt) gap) 3) - nif (sub nif !liv.q.n.puq) - == - =+ lez=(dec (need (~(get by pyz) gom.q.n.puq))) - =^ gub pyz - ?: =(0 lez) - [[~ gom.q.n.puq] (~(del by pyz) gom.q.n.puq)] - [~ (~(put by pyz) gom.q.n.puq lez)] - :- gub - +>.$(puq ~(nap to puq)) - -The first few lines are simply looking through the packet queue until we -find the correct packet to ack. This is basic queue manipulation that -operates directly on the treap structure of the queue. If you understand -treap queues, the logic is easy to follow. Otherwise, just trust us that -by the time we get to the `=:`, the packet with sequence number `num` is -on the top of the packet queue (that is, at `n.puq`). - -We first update the round-trip time. If the packet is either not alive -or had to be transmitted more than once, then we don't have any reliable -way of calculating the round-trip time since we're unsure of exactly -which transmission was acknowledged. Otherwise, the round-trip time is -the difference between now and when the packet was last sent. We set -`rtt` by a little weighted average where the previous smoothed RTT is -weighted twice as much as the RTT of the current packet. Thus, -`(2*rtt+gap)/3`. This gives us a nice smooth RTT that is somewhat -resilient to outlier data while still being responsive to our -ever-changing world. - -If the packet wasn't already dead, then we decrement the number of live -packets, which may allow more packets to be sent. - -We decrement the number of unacknowledged packets in our `pyz` for this -particular message. If you recall, this was set in `++whap` to the -number of packets required to send a message. - -If that was the last packet in the messge that needed to be acked, then -we delete the messgae reference from `pyz` and produce the id of the -message. Otherwise, we simply update `pyz` with the new number of -unacked messages. In either case, we remove the packet from the packet -queue. - - ++ wept :: wept:pu - |= [fip=@ud lap=@ud] :: fip thru lap-1 - =< abet =< apse - |% - ++ abet +>.$ - ++ apse - ^+ . - ?~ puq . - ?: (lth p.n.puq fip) ?~(l.puq . left) - ?: (gte p.n.puq lap) ?~(r.puq . rigt) - => rigt =< left - ?> ?=(^ puq) - ?.(liv.q.n.puq . .(nif (dec nif), liv.q.n.puq |)) - :: - ++ left - ?> ?=(^ puq) - ^+(. =+(lef=apse(puq l.puq) lef(puq [n.puq puq.lef r.puq]))) - ++ rigt - ?> ?=(^ puq) - ^+(. =+(rig=apse(puq r.puq) rig(puq [n.puq l.puq puq.rig]))) - -- - -The algorithm is a simple case of traversing the packet queue. -Essentialy, we mark as dead all packets in the queue between `fip` and -`(dec lap)`. We also update `nif`, the number of live packets. Lest you -mourn too much the passing of these packets, know that they shall soon -rise again. Recall that in `++bick` after the call to `++bock` we call -`++harv`. This will resend the packets that have just been labeled dead. - - ++ beet :: beet:pu - ^+ . :: advance unacked - =- +(nep ?~(foh nus u.foh)) - ^= foh - |- ^- (unit ,@ud) - ?~ puq ~ - ?: (lte p.n.puq nep) $(puq l.puq) - =+ rig=$(puq r.puq) - ?^(rig rig [~ p.n.puq]) - -Here we search for the next expected packet number. Basically, we search -the queue for the leftmost packet whose number is greater than the -current `nep`. If we don't find any such packet, we just use the total -number of packets sent. - -We can now dive into `++bock`, our last arm. - - ++ bock :: bock:pu - |= [now=@da num=@ud] :: ack by sequence - ^- [(unit soup) _+>] - =^ gym +> (bine now num) - :- gym - ?: (gth num nep) - =+ cam=(max 2 (div caw 2)) - :: ~& [%bock-hole num nep cam] - beet:(wept(nep num, cag cam, caw cam) nep num) - =. caw ?: (lth caw cag) +(caw) - (add caw !=(0 (mod (mug now) caw))) - ?: =(num nep) - :: ~& [%bock-fine num nif caw cag] - beet - :: ~& [%bock-fill num nif caw cag] - +>.$ - -First, we call `++bine` to apply the ack to the packet pump information. -We produce `gym`, which, if it exists, is the id of the packet that was -acked. If we received an ack for a packet later than the one we -expected, then we halve the logical packet window and kill all the -earlier packets so that they may be resent. - -Otherwise, we possibly increase the congestion window. If the window is -less than the congestion threshold, then we increment the size of the -window. Otherwise, we only increment one out of every `caw` times. - -If we received an ack for the packet we expected, then we simply advance -`nep` with `++beet`. If we received an ack for a packet earlier than we -expected, we do nothing. - -It may be hard to believe, but we are, in fact, done. The message has -been sent, received, acknowledged, and the acknowledgment has been -returned to the original sender. We hope it's clear that, while the -process has been somewhat involved, the algorithms are not all that -complicated. If you've read this far, you know `%ames`. The only other -code involves initialization, timeouts, and the like. - -Below, we give detailed reference documentation for the data models -involved. - -Data Models ------------ - -### `++fort`, formal state - - ++ fort :: formal state - $: %0 :: version - gad=duct :: client interface - hop=@da :: network boot date - ton=town :: security - zac=(map ship corn) :: flows by server - == :: - -This is the state of our vane. Anything that must be remembered between -calls to ames must be stored in this state. - -`%0` is the version of the ames state model itself. If the data model -`++fort` changes, then this number needs to be incremented, and an -adapter must be written to upgrade the old state into the new state. -Note that this is the version number of the model itself, not the -contents. When the data changes, there is of course no need to change -this. - -`gad` is a `duct` over which we send `%send` cards to unix. This card is -initialized when unix sends a `%barn` card as vere starts up. Vere -treats this duct specially -- don't send anything weird over it. - -`hop` is the network boot date. This is set when the `%kick` card is -sent by vere on start up. - -`ton` is a `++town`, where we store all of our security/encryption -state. Note that this is shared across all ships on a pier. - -`zac` is a map of ships to `++corn`. This stores all the per-ship state. -The keys to this map are the ships on the current pier. - -### `++town`, all security state - - ++ town :: all security state - $: lit=@ud :: imperial modulus - any=@ :: entropy - urb=(map ship sufi) :: all keys and routes - fak=? :: - == :: - -This is the security state of our pier. - -`lit` is unused. - -`any` is 256 bits of entropy. This entropy is used and updated in -exactly two places: when we send a `%junk` card, and when we generate a -new symmetric key in `++griz:lax:as:go`. When it is updated, it is -updated by a SHA-256 hash of the current time and the old value of the -entropy. - -`urb` is a map of ships to `++sufi`. This is where we store all the -per-ship state for the pier. The keys to this map are the ships on the -current pier. - -`fak` is true if we are on a fake network. This disables certain -security checks so that anyone may run a fake `~zod`. This is used only -for development. To use, run vere with the `-F` option (and the -`-I ~zod` option for a fake `~zod`). - -### `++sufi`, domestic host - - ++ sufi :: domestic host - $: hoy=(list ship) :: hierarchy - val=wund :: private keys - law=will :: server will - seh=(map hand ,[p=ship q=@da]) :: key cache - hoc=(map ship dore) :: neighborhood - == :: - -This is the security state of a domestic server. - -`hoy` is a list of the ships directly above us in the hierarchy of -ships. For example, for `~hoclur-bicrel`, this would be `~tasruc` and -`~tug`. See `++sein`. - -`val` is a list of our private keys. - -`law` is our certificate, which is a list of the XXX - -`seh` - -`hoc` is a map of ships to `++dore`. The stores all the security -informatoin about foreign ships. The keys to this map are the neighbors -(ships we have been in contact with) of this domestic server. - -### `++wund`, private keys - - ++ wund (list ,[p=life q=ring r=acru]) :: mace in action - -This is a list of our own private keys, indexed by life. The key itself -is the `++ring`, and the `++acru` is the encryption engine. We generate -the `++acru` from the private key by calling `++weur`. Thus, we can at -any time regenerate our `++wund` from a `++mace`. The current crypto is -at the head of the list and can be accessed with `++sen:as:go`. - -### `++ring`, private key - - ++ ring ,@ :: private key - -This is a private key. The first byte is reserved to identify the type -of cryptography. Lower-case means public key, upper-case means public -key, and the letter identifies which `++acru` to use. - -### `++pass`, public key - - ++ pass ,@ :: public key - -This is a public key. The first byte is reserved to identify the type of -cryptography. Lower-case means public key, upper-case means public key, -and the letter identifies which `++acru` to use. - -### `++mace`, private secrets - - ++ mace (list ,[p=life q=ring]) :: private secrets - -This is a list of the our private keys, indexed by life. From this we -can generate a `++wund` for actual use. - -### `++skin`, encoding stem - - ++ skin ?(%none %open %fast %full) :: encoding stem - -This defines the type of encryption used for each message. `%none` -refers to messages sent in the clear, `%open` refers to signed messages, -`%full` refers to sealed messages, and `%fast` refers to symmetrically -encrypted messages. See `++acru` for details. - -### `++acru`, asymmetric cryptosuite - - ++ acru :: asym cryptosuite - $_ ^? |% :: opaque object - ++ as ^? :: asym ops - |% ++ seal |=([a=pass b=@ c=@] _@) :: encrypt to a - ++ sign |=([a=@ b=@] _@) :: certify as us - ++ sure |=([a=@ b=@] *(unit ,@)) :: authenticate from us - ++ tear |= [a=pass b=@] :: accept from a - *(unit ,[p=@ q=@]) :: - -- :: - ++ de |+([a=@ b=@] *(unit ,@)) :: symmetric de, soft - ++ dy |+([a=@ b=@] _@) :: symmetric de, hard - ++ en |+([a=@ b=@] _@) :: symmetric en - ++ ex ^? :: export - |% ++ fig _@uvH :: fingerprint - ++ pac _@uvG :: default passcode - ++ pub *pass :: public key - ++ sec *ring :: private key - -- - ++ nu ^? :: reconstructors - |% ++ pit |=([a=@ b=@] ^?(..nu)) :: from [width seed] - ++ nol |=(a=@ ^?(..nu)) :: from naked ring - ++ com |=(a=@ ^?(..nu)) :: from naked pass - -- - -- - -This is an opaque interface for a general asymmetric cryptosuite. Any -form of asymmetric cryptography can be dropped in to be used instead of -the default. Right now, there are two cryptosuites, `++crua`, which is -your standard RSA, and `++crub`, which is elliptic curve crypto but is -mostly stubbed out at the moment. - -#### `++as:acru`, asymmetric operations - - ++ as ^? :: asym ops - |% ++ seal |=([a=pass b=@ c=@] _@) :: encrypt to a - ++ sign |=([a=@ b=@] _@) :: certify as us - ++ sure |=([a=@ b=@] *(unit ,@)) :: authenticate from us - ++ tear |= [a=pass b=@] :: accept from a - *(unit ,[p=@ q=@]) :: - -- :: - -This is the core that defines the standard asymmetric cryptography -operations. - -`++seal:as:acru` allows us to send a message encrypted with someone's -public key so that only they may read it. If Alice seals a message with -Bob's public key, then she can be sure that Bob is the only one who can -read it. This is associated with the `++skin` `%full`. - -`++sign:as:acru` allows us to sign a message with our private key so -that others can verify that we sent the message. If Alice signs a -message with her private key, then Bob can verify with her public key -that it was indeed Alice who sent it. This is associated with the -`++skin` `%open`. - -`++sure:as:acru` is the dual to `++sign:as:acru`. It allows us to verify -that a message we have received is indeed from the claimed sender. If -Alice sends a message with her private key, then Bob can use this arm to -verify that it was indeed Alice who sent it. This is associated with the -`++skin` `%open`. - -`++tear:as:acru` is the dual to `++seal:as:acru`. It allows us to read a -message that we can be sure is only read by us. If Alice seals a message -with Bob's public key, then Bob can use this arm to read it. This is -associated with the `++skin` `%full`. - -#### `++de:acru`, `++dy:acru`, and `++en:acru`, symmetric encryption/decryption - - ++ de |+([a=@ b=@] *(unit ,@)) :: symmetric de, soft - ++ dy |+([a=@ b=@] _@) :: symmetric de, hard - ++ en |+([a=@ b=@] _@) :: symmetric en - -Symmetric encryption is associated with the `++skin` `%fast`. - -`++de:acru` decrypts a message with a symmetric key, returning `~` on -failure and `[~ u=data]` on success. - -`++dy:acru` decrypts a message with a symmetric key, crashing on -failure. This should almost always be defined as, and should always be -semantically equivalent to, `(need (de a b))`. - -`++en:acru` encrypts a message with a symmetric key. - -#### `++ex:acru`, exporting data - - ++ ex ^? :: export - |% ++ fig _@uvH :: fingerprint - ++ pac _@uvG :: default passcode - ++ pub *pass :: public key - ++ sec *ring :: private key - -- - -`++fig:ex:acru` is our fingerprint, usually a hash of our public key. -This is used, for example, in `++zeno`, where every carrier owner's -fingerprint is stored so that we can ensure that carriers are indeed -owned by their owners - -`++pac:ex:acru` is our default passcode, which is unused at present. - -`++pub:ex:acru` is the `++pass` form of our public key. - -`++sec:ex:acru` is the `++ring` form of our private key. - -#### `++nu:acru`, reconstructors - - ++ nu ^? :: reconstructors - |% ++ pit |=([a=@ b=@] ^?(..nu)) :: from [width seed] - ++ nol |=(a=@ ^?(..nu)) :: from naked ring - ++ com |=(a=@ ^?(..nu)) :: from naked pass - -- - -These arms allow us to reconstruct a `++acru` from basic data. - -`++pit:nu:acru` constructs a `++acru` from the width of our intended key -and seed entropy. This is usually used in the initial construction of -the `++acru`. - -`++nol:nu:acru` constructs a `++acru` from a "naked ring", meaning a -`++ring` without the initial byte identifying the type of crypto. There -is often a helper arm that that wraps this; see `++weur` for `++crua` -and `++wear` for `++crub`. - -`++com:nu:acru` constructs a `++acru` from a "naked pass", meaning a -`++ring` without the initial byte identifying the type of crypto. There -is often a helper arm that that wraps this; see `++haul` for `++crua` -and `++hail` for `++crub`. - -### `++will`, certificate - - ++ will (list deed) :: certificate - -This is a list of deeds associated with the current ship. There should -be an item in this list for every ship from this point up in the -hierarchy times the number of lives that each ship has had. For example, -\~hoclur-bicrel may have a will with three items: one for itself, one -for \~tasruc (who issued \~hoclur-bicrel's deed) and one for \~tug (who -issued \~tasruc's deed). - -### `++deed`, identity - - ++ deed ,[p=@ q=step r=?] :: sig, stage, fake? - -`p` is the signature of a particular deed, which is a signed copy of -`q`. - -`q` is the stage in the identity. - -`r` is true if we're working on a fake network, where we don't check -that the carrier fingerprints are correct. This allows us to create fake -networks for development without interfering with the real network. - -### `++step`, identity stage - - ++ step ,[p=bray q=gens r=pass] :: identity stage - -This is a single stage in our identity. Thus, this is specific to a -single life in a single ship. Everything in here may change between -lives. - -`p` - -`q` - -`r` is the public key for this stage in the identity. - -### `++bray` - - ++ bray ,[p=life q=(unit life) r=ship s=@da] :: our parent us now - -XXX - -### `++gens`, general identity - - ++ gens ,[p=lang q=gcos] :: general identity - -`p` is the IETF language code for the preferred language of this -identity. This is unused at the moment, but in the future text should be -localized based on this. - -`q` is the description of the ship. - -### `++gcos`, identity description - - ++ gcos :: id description - $% [%czar ~] :: 8-bit ship - [%duke p=what] :: 32-bit ship - [%earl p=@t] :: 64-bit ship - [%king p=@t] :: 16-bit ship - [%pawn p=(unit ,@t)] :: 128-bit ship - == :: - -This is the description of the identity of a ship. Most types of -identity have a `@t` field, which is their human-readable name. The -identity of a `%duke` is more involved. - -A `%czar`, a carrier, is a ship with an 8-bit address. Thus, there are -only 256 carriers. These are at the top of the namespace hierarchy, and -the fingerprint of each carrier is stored in `++zeno`. These are the -"senators" of Urbit. - -A `%king`, a cruiser, is a ship with a 16-bit address. Thus, there are -65,536 cruisers. Each carrier may issue 256 cruisers. These are the -infrastructure of Urbit. - -A `%duke`, a destroyer, is a ship with a 32-bit address. Thus, there are -4,294,967,296 destroyers. Each cruiser may issue 65,536 cruisers. These -are the individuals of Urbit. - -A `%earl`, a yacht, is a ship with a 64-bit address. Thus, there are -18,446,744,073,709,551,616 yachts. Each destroyer may issue -4,294,967,296 yachts. These are the devices of Urbit. - -A `%pawn`, a submarine, is a ship with a 128-bit address. Thus, there -are a lot of submarines. The chance of random name collision is -negligible, so submarines are not issued by any ship. They must simply -assert their presence, and they are all considered children of \~zod. -This is the underworld of Urbit, where anonymity reigns supreme. - -### `++what`, logical destroyer identity - - ++ what :: logical identity - $% [%anon ~] :: anonymous - [%lady p=whom] :: female person () - [%lord p=whom] :: male person [] - [%punk p=sect q=@t] :: opaque handle "" - == :: - -This is the logical identity of a destroyer. - -A `%anon` is a completely anonymous destroyer. The difference between -this and a submarine is that a submarine is ephemeral while a `%anon` -destroyer is not. Thus, we may not know who \~hoclur-bicrel is, but we -do know that it's always the same person. - -A `%lady` is a female person. The name used here should be a real name. - -A `%lord` is a male person. The name used here should be a real name. - -A `%punk` is a person who is identified only by a handle. - -### `++whom`, real person - - ++ whom ,[p=@ud q=govt r=sect s=name] :: year/govt/id - -Ths is the information associated with a real person. It is mostly -information that could be observed with the briefest of interactions. - -`p` is the birth year. - -`q` is the location of a user, usually of the form "country/zip". - -`r` is the sect of the user. - -`s` is the real name of the person. - -### `++govt` - - ++ govt path :: country/postcode - -This is the location of the user, usually of the form "country/zip". - -### `++sect` - - ++ sect ?(%black %blue %red %orange %white) :: banner - -XXX - -### `++name` - - ++ name ,[p=@t q=(unit ,@t) r=(unit ,@t) s=@t] :: first mid/nick last - -This is the given name, possible middle name/initial, possible nickname, -and surname of a user. diff --git a/pub/doc/arvo/arvo.md b/pub/doc/arvo/arvo.md deleted file mode 100644 index 263c885728..0000000000 --- a/pub/doc/arvo/arvo.md +++ /dev/null @@ -1,8 +0,0 @@ -
- -`%arvo` -======= - -Our operating system. - -
diff --git a/pub/doc/arvo/arvo/commentary.md b/pub/doc/arvo/arvo/commentary.md deleted file mode 100644 index 361c436295..0000000000 --- a/pub/doc/arvo/arvo/commentary.md +++ /dev/null @@ -1,4 +0,0 @@ -`%arvo` commentary -================== - -`%arvo` is our operating system. diff --git a/pub/doc/arvo/clay.md b/pub/doc/arvo/clay.md deleted file mode 100644 index a80fb1c7f5..0000000000 --- a/pub/doc/arvo/clay.md +++ /dev/null @@ -1,27 +0,0 @@ -
- -`%clay` -======= - -Our filesystem. - -`%clay` is version-controlled, referentially-transparent, and global. -While this filesystem is stored in `%clay`, it is mirrored to Unix for -convenience. Unix tells `%clay`s whenever a file changes in the Unix -copy of the filesystem so that the change may be applied. `%clay` tells -unix whenever an app or vane changes the filesystem so that the change -can be effected in Unix. Apps and vanes may use `%clay` to write to the -filesystem, query it, and subscribe to changes in it. Ford and gall use -`%clay` to serve up apps and web pages. - -`%clay` includes three components. First is the filesystem/version -control algorithms, which are mostly defined in `++ze` and `++zu` in -zuse. Second is the write, query, and subscription logic. Finally, there -is the logic for communicating requests to, and receiving requests from, -foreign ships. - -
- ------------------------------------------------------------------------- - - diff --git a/pub/doc/arvo/clay/architecture.md b/pub/doc/arvo/clay/architecture.md deleted file mode 100644 index d27ab0f34a..0000000000 --- a/pub/doc/arvo/clay/architecture.md +++ /dev/null @@ -1,415 +0,0 @@ -# clay - -## high-level - -clay is the primary filesystem for the arvo operating system, -which is the core of an urbit. The architecture of clay is -intrinsically connected with arvo, but we assume no knowledge of -either arvo or urbit. We will point out only those features of -arvo that are necessary for an understanding of clay, and we will -do so only when they arise. - -The first relevant feature of arvo is that it is a deterministic -system where input and output are defined as a series of events -and effects. The state of arvo is simply a function of its event -log. None of the effects from an event are emitted until the -event is entered in the log and persisted, either to disk or -another trusted source of persistence, such as a Kafka cluster. -Consequently, arvo is a single-level store: everything in its -state is persistent. - -In a more traditional OS, everything in RAM can be erased at any -time by power failure, and is always erased on reboot. Thus, a -primary purpose of a filesystem is to ensure files persist across -power failures and reboots. In arvo, both power failures and -reboots are special cases of suspending computation, which is -done safely since our event log is already persistent. Therefore, -clay is not needed in arvo for persistence. Why, then, do we have a -filesystem? There are two answers to this question. - -First, clay provides a filesystem tree, which is a convenient -user interface for some applications. Unix has the useful concept -of virtual filesystems, which are used for everything from direct -access to devices, to random number generators, to the /proc -tree. It is easy and intuitive to read from and write to a -filesystem tree. - -Second, clay has a distributed revision control system baked into -it. Traditional filesystems are not revision controlled, so -userspace software -- such as git -- is written on top of them to -do so. clay natively provides the same functionality as modern -DVCSes, and more. - -clay has two other unique properties that we'll cover later on: -it supports typed data and is referentially transparent. - -### Revision Control - -Every urbit has one or more "desks", which are independently -revision-controlled branches. Each desk contains its own mark -definitions, apps, doc, and so forth. - -Traditionally, an urbit has at least a base and a home desk. The -base desk has all the system software from the distribution. the -home desk is a fork of base with all the stuff specific to the -user of the urbit. - -A desk is a series of numbered commits, the most recent of which -represents the current state of the desk. A commit is composed of -(1) an absolute time when it was created, (2) a list of zero or -more parents, and (3) a map from paths to data. - -Most commits have exactly one parent, but the initial commit on a -desk may have zero parents, and merge commits have more than one -parent. - -The non-meta data is stored in the map of paths to data. It's -worth noting that no constraints are put on this map, so, for -example, both /a/b and /a/b/c could have data. This is impossible -in a traditional Unix filesystem since it means that /a/b is both -a file and a directory. Conventionally, the final element in the -path is its mark -- much like a filename extension in Unix. Thus, -/doc/readme.md in Unix is stored as /doc/readme/md in urbit. - -The data is not stored directly in the map; rather, a hash of the -data is stored, and we maintain a master blob store. Thus, if the -same data is referred to in multiple commits (as, for example, -when a file doesn't change between commits), only the hash is -duplicated. - -In the master blob store, we either store the data directly, or -else we store a diff against another blob. The hash is dependent -only on the data within and not on whether or not it's stored -directly, so we may on occasion rearrange the contents of the -blob store for performance reasons. - -Recall that a desk is a series of numbered commits. Not every -commit in a desk must be numbered. For example, if the base desk -has had 50 commits since home was forked from it, then a merge -from base to home will only add a single revision number to home, -although the full commit history will be accessible by traversing -the parentage of the individual commits. - -We do guarantee that the first commit is numbered 1, commits are -numbered consecutively after that (i.e. there are no "holes"), -the topmost commit is always numbered, and every numbered commit -is an ancestor of every later numbered commit. - -There are three ways to refer to particular commits in the -revision history. Firstly, one can use the revision number. -Secondly, one can use any absolute time between the one numbered -commit and the next (inclusive of the first, exclusive of the -second). Thirdly, every desk has a map of labels to revision -numbers. These labels may be used to refer to specific commits. - -Additionally, clay is a global filesystem, so data on other urbit -is easily accessible the same way as data on our local urbit. In -general, the path to a particular revision of a desk is -/~urbit-name/desk-name/revision. Thus, to get /try/readme/md -from revision 5 of the home desk on ~sampel-sipnym, we refer to -/~sampel-sipnym/home/5/try/readme/md. Clay's namespace is thus -global and referentially transparent. - -XXX reactivity here? - -### A Typed Filesystem - -Since clay is a general filesystem for storing data of arbitrary -types, in order to revision control correctly it needs to be -aware of types all the way through. Traditional revision control -does an excellent job of handling source code, so for source code -we act very similar to traditional revision control. The -challenge is to handle other data similarly well. - -For example, modern VCSs generally support "binary files", which -are files for which the standard textual diffing, patching, and -merging algorithms are not helpful. A "diff" of two binary files -is just a pair of the files, "patching" this diff is just -replacing the old file with the new one, and "merging" -non-identical diffs is always a conflict, which can't even be -helpfully annotated. Without knowing anything about the structure -of a blob of data, this is the best we can do. - -Often, though, "binary" files have some internal structure, and -it is possible to create diff, patch, and merge algorithms that -take advantage of this structure. An image may be the result of a -base image with some set of operations applied. With algorithms -aware of this set of operations, not only can revision control -software save space by not having to save every revision of the -image individually, these transformations can be made on parallel -branches and merged at will. - -Suppose Alice is tasked with touching up a picture, improving the -color balance, adjusting the contrast, and so forth, while Bob -has the job of cropping the picture to fit where it's needed and -adding textual overlay. Without type-aware revision control, -these changes must be made serially, requiring Alice and Bob to -explicitly coordinate their efforts. With type-aware revision -control, these operations may be performed in parallel, and then -the two changesets can be merged programmatically. - -Of course, even some kinds of text files may be better served by -diff, patch, and merge algorithms aware of the structure of the -files. Consider a file containing a pretty-printed JSON object. -Small changes in the JSON object may result in rather significant -changes in how the object is pretty-printed (for example, by -addding an indentation level, splitting a single line into -multiple lines). - -A text file wrapped at 80 columns also reacts suboptimally with -unadorned Hunt-McIlroy diffs. A single word inserted in a -paragraph may push the final word or two of the line onto the -next line, and the entire rest of the paragraph may be flagged as -a change. Two diffs consisting of a single added word to -different sentences may be flagged as a conflict. In general, -prose should be diffed by sentence, not by line. - -As far as the author is aware, clay is the first generalized, -type-aware revision control system. We'll go into the workings -of this system in some detail. - -### Marks - -Central to a typed filesystem is the idea of types. In clay, we -call these "marks". A mark is a file that defines a type, -conversion routines to and from the mark, and diff, patch, and -merge routines. - -For example, a `%txt` mark may be a list of lines of text, and it -may include conversions to `%mime` to allow it to be serialized -and sent to a browswer or to the unix filesystem. It will also -include Hunt-McIlroy diff, patch, and merge algorithms. - -A `%json` mark would be defined as a json object in the code, and -it would have a parser to convert from `%txt` and a printer to -convert back to `%txt`. The diff, patch, and merge algorithms are -fairly straightforward for json, though they're very different -from the text ones. - -More formally, a mark is a core with three arms, `++grab`, -`++grow`, and `++grad`. In `++grab` is a series of functions to -convert from other marks to the given mark. In `++grow` is a -series of functions to convert from the given mark to other -marks. In `++grad` is `++diff`, `++pact`, `++join`, and `++mash`. - -The types are as follows, in an informal pseudocode: - - ++ grab: - ++ mime: -> - ++ txt: -> - ... - ++ grow: - ++ mime: -> - ++ txt: -> - ... - ++ grad - ++ diff: (, ) -> - ++ pact: (, ) -> - ++ join: (, ) -> or NULL - ++ mash: (, ) -> - -These types are basically what you would expect. Not every mark -has each of these functions defined -- all of them are optional -in the general case. - -In general, for a particular mark, the `++grab` and `++grow` entries -(if they exist) should be inverses of each other. - -In `++grad`, `++diff` takes two instances of a mark and produces -a diff of them. `++pact` takes an instance of a mark and patches -it with the given diff. `++join` takes two diffs and attempts to -merge them into a single diff. If there are conflicts, it -produces null. `++mash` takes two diffs and forces a merge, -annotating any conflicts. - -In general, if `++diff` called with A and B produces diff D, then -`++pact` called with A and D should produce B. Also, if `++join` -of two diffs does not produce null, then `++mash` of the same -diffs should produce the same result. - -Alternately, instead of `++diff`, `++pact`, `++join`, and -`++mash`, a mark can provide the same functionality by defining -`++sted` to be the name of another mark to which we wish to -delegate the revision control responsibilities. Then, before -running any of those functions, clay will convert to the other -mark, and convert back afterward. For example, the `%hoon` mark -is revision-controlled in the same way as `%txt`, so its `++grad` -is simply `++sted %txt`. Of course, `++txt` must be defined in -`++grow` and `++grab` as well. - -Every file in clay has a mark, and that mark must have a -fully-functioning `++grad`. Marks are used for more than just -clay, and other marks don't need a `++grad`, but if a piece of -data is to be saved to clay, we must know how to revision-control -it. - -Additionally, if a file is to be synced out to unix, then it must -have conversion routines to and from the `%mime` mark. - -##Using clay - -### Reading and Subscribing - -When reading from Clay, there are three types of requests. A -`%sing` request asks for data at single revsion. A `%next` -request asks to be notified the next time there's a change to -given file. A `%many` request asks to be notified on every -change in a desk for a range of changes. - -For `%sing` and `%next`, there are generally three things to be -queried. A `%u` request simply checks for the existence of a -file at a path. A `%x` request gets the data in the file at a -path. A `%y` request gets a hash of the data in the file at the -path combined with all its children and their data. Thus, `%y` -of a node changes if it or any of its children change. - -A `%sing` request is fulfilled immediately if possible. If the -requested revision is in the future, or is on another ship for -which we don't have the result cached, we don't respond -immediately. If the requested revision is in the future, we wait -until the revision happens before we respond to the request. If -the request is for data on another ship, we pass on the request -to the other ship. In general, Clay subscriptions, like most -things in Urbit, aren't guaranteed to return immediately. -They'll return when they can, and they'll do so in a -referentially transparent manner. - -A `%next` request checks query at the given revision, and it -produces the result of the query the next time it changes, along -with the revsion number when it changes. Thus, a `%next` of a -`%u` is triggered when a file is added or deleted, a `%next` of a -`%x` is triggered when a file is added, deleted, or changed, and -a `%next` of a `%y` is triggered when a file or any of its -children is added, deleted, or changed. - -A `%many` request is triggered every time the given desk has a -new revision. Unlike a `%next`, a `%many` has both a start and -an end revsion, after which it stops returning. For `%next`, a -single change is reported, and if the caller wishes to hear of -the next change, it must resubscribe. For `%many`, every revsion -from the start to the end triggers a response. Since a `%many` -request doesn't ask for any particular data, there aren't `%u`, -`%x`, and `%y` versions for it. - -### Unix sync - -One of the primary functions of clay is as a convenient user -interface. While tools exist to use clay from within urbit, it's -often useful to be able to treat clay like any other filesystem -from the Unix perspective -- to "mount" it, as it were. - -From urbit, you can run `|mount /path/to/directory %mount-point`, -and this will mount the given clay directory to the mount-point -directory in Unix. Every file is converted to `%mime` before it's -written to Unix, and converted back when read from Unix. The -entire directory is watched (a la Dropbox), and every change is -auto-committed to clay. - -### Merging - -Merging is a fundamental operation for a distributed revision -control system. At their root, clay's merges are similar to -git's, but with some additions to accomodate typed data. There -are seven different merge strategies. - -Throughout our discussion, we'll say that the merge is from -Alice's desk to Bob's. Recall that a commit is a date (for all -new commits this will be the current date), a list of parents, -and the data itself. - -A `%init` merge should be used iff it's the first commit to a -desk. The head of Alice's desk is used as the number 1 commit to -Bob's desk. Obviously, the ancestry remains intact through -traversing the parentage of the commit even though previous -commits are not numbered for Bob's desk. - -A `%this` merge means to keep what's in Bob's desk, but join the -ancestry. Thus, the new commit has the head of each desk as -parents, but the data is exactly what's in Bob's desk. For those -following along in git, this is the 'ours' merge strategy, not -the '--ours' option to the 'recursive' merge strategy. In other -words, even if Alice makes a change that does not conflict with -Bob, we throw it away. It's Bob's way or the highway. - -A `%that` merge means to take what's in Alice's desk, but join -the ancestry. This is the reverse of `%this`. - -A `%fine` merge is a "fast-forward" merge. This succeeds iff one -head is in the ancestry of the other. In this case, we use the -descendant as our new head. - -For `%meet`, `%mate`, and `%meld` merges, we first find the most -recent common ancestor to use as our merge base. If we have no -common ancestors, then we fail. If we have more than one most -recent common ancestor, then we have a criss-cross situation, -which should be handled delicately. At present, we delicately -throw up our hands and give up, but something akin to git's -'recursive' strategy should be implemented in the future. - -There's a functional inclusion ordering on `%fine`, `%meet`, -`%mate`, and `%meld` such that if an earlier strategy would have -succeeded, then every later strategy will produce the same -result. Put another way, every earlier strategy is the same as -every later strategy except with a restricted domain. - -A `%meet` merge only succeeds if the changes from the merge base -to Alice's head (hereafter, "Alice's changes") are in different -files than Bob's changes. In this case, the parents are both -Alice's and Bob's heads, and the data is the merge base plus -Alice's changed files plus Bob's changed files. - -A `%mate` merge attempts to merge changes to the same file when -both Alice and bob change it. If the merge is clean, we use it; -otherwise, we fail. A merge between different types of changes -- -for example, deleting a file vs changing it -- is always a -conflict. If we succeed, the parents are both Alice's and Bob's -heads, and the data is the merge base plus Alice's changed files -plus Bob's changed files plus the merged files. - -A `%meld` merge will succeed even if there are conflicts. If -there are conflicts in a file, then we use the merge base's -version of that file, and we produce a set of files with -conflicts. The parents are both Alice's and Bob's heads, and the -data is the merge base plus Alice's changed files plus Bob's -changed files plus the successfully merged files plus the merge -base's version of the conflicting files. - -That's the extent of the merge options in clay proper. In -userspace there's a final option `%auto`, which is the most -common. `%auto` checks to see if Bob's desk exists, and if it -doesn't we use a `%init` merge. Otherwise, we progressively try -`%fine`, `%meet`, and `%mate` until one succeeds. - -If none succeed, we merge Bob's desk into a scratch desk. Then, -we merge Alice's desk into the scratch desk with the `%meld` -option to force the merge. For each file in the produced set of -conflicting files, we call the `++mash` function for the -appropriate mark, which annotates the conflicts if we know how. - -Finally, we display a message to the user informing them of the -scratch desk's existence, which files have annotated conflicts, -and which files have unannotated conflicts. When the user has -resolved the conflicts, they can merge the scratch desk back into -Bob's desk. This will be a `%fine` merge since Bob's head is in -the ancestry of the scratch desk. - -### Autosync - -Tracking and staying in sync with another desk is another -fundamental operation. We call this "autosync". This doesn't mean -simply mirroring a desk, since that wouldn't allow local changes. -We simply want to apply changes as they are made upstream, as -long as there are no conflicts with local changes. - -This is implemented by watching the other desk, and, when it has -changes, merging these changes into our desk with the usual merge -strategies. - -Note that it's quite reasonable for two desks to be autosynced to -each other. This results in any change on one desk being mirrored -to the other and vice versa. - -Additionally, it's fine to set up an autosync even if one desk, -the other desk, or both desks do not exist. The sync will be -activated when the upstream desk comes into existence and will -create the downstream desk if needed. diff --git a/pub/doc/arvo/clay/commentary.md b/pub/doc/arvo/clay/commentary.md deleted file mode 100644 index daf8bc7dfb..0000000000 --- a/pub/doc/arvo/clay/commentary.md +++ /dev/null @@ -1,2007 +0,0 @@ -`%clay` commentary -================== - -`%clay` is our filesystem. - -The first part of this will be reference documentation for the data -types used by our filesystem. In fact, as a general guide, we recommend -reading and attempting to understand the data structures used in any -Hoon code before you try to read the code itself. Although complete -understanding of the data structures is impossible without seeing them -used in the code, an 80% understanding greatly clarifies the code. As -another general guide, when reading Hoon, it rarely pays off to -understand every line of code when it appears. Try to get the gist of -it, and then move on. The next time you come back to it, it'll likely -make a lot more sense. - -After a description of the data models, we'll give an overview of the -interface that vanes and applications can use to interact with the -filesystem. - -Finally, we'll dive into the code and the algorithms themselves. You -know, the fun part. - -Data Models ------------ - -As you're reading through this section, remember you can always come -back to this when you run into these types later on. You're not going to -remember everything the first time through, but it is worth reading, or -at least skimming, this so that you get a rough idea of how our state is -organized. - -The types that are certainly worth reading are `++raft`, `++room`, -`++dome`, `++ankh`, `++rung`, `++rang`, `++blob`, `++yaki`, and `++nori` -(possibly in that order). All in all, though, this section isn't too -long, so many readers may wish to quickly read through all of it. If you -get bored, though, just skip to the next section. You can always come -back when you need to. - -### `++raft`, formal state - - ++ raft :: filesystem - $: fat=(map ship room) :: domestic - hoy=(map ship rung) :: foreign - ran=rang :: hashes - == :: - -This is the state of our vane. Anything that must be remembered between -calls to clay is stored in this state. - -`fat` is the set of domestic servers. This stores all the information -that is specfic to a particular ship on this pier. The keys to this map -are the ships on the current pier. all the information that is specific -to a particular foreign ship. The keys to this map are all the ships -whose filesystems we have attempted to access through clay. - -`ran` is the store of all commits and deltas, keyed by hash. The is -where all the "real" data we know is stored; the rest is "just -bookkeeping". - -### `++room`, filesystem per domestic ship - - ++ room :: fs per ship - $: hun=duct :: terminal duct - hez=(unit duct) :: sync duch - dos=(map desk dojo) :: native desk - == :: - -This is the representation of the filesystem of a ship on our pier. - -`hun` is the duct we use to send messages to dill to display -notifications of filesystem changes. Only `%note` gifts should be -produced along this duct. This is set by the `%init` kiss. - -`hez`, if present, is the duct we use to send sync messages to unix so -that they end up in the pier unix directory. Only `%ergo` gifts should -be producd along this duct. This is set by `%into` and `%invo` kisses. - -`dos` is a well-known operating system released in 1981. It is also the -set of desks on this ship, mapped to their data. - -### `++desk`, filesystem branch - - ++ desk ,@tas :: ship desk case spur - -This is the name of a branch of the filesystem. The default desks are -"arvo", "main", and "try". More may be created by simply referencing -them. Desks have independent histories and states, and they may be -merged into each other. - -### `++dojo`, domestic desk state - - ++ dojo ,[p=cult q=dome] :: domestic desk state - -This is the all the data that is specific to a particular desk on a -domestic ship. `p` is the set of subscribers to this desk and `q` is the -data in the desk. - -### `++cult`, subscriptions - - ++ cult (map duct rave) :: subscriptions - -This is the set of subscriptions to a particular desk. The keys are the -ducts from where the subscriptions requests came. The results will be -produced along these ducts. The values are a description of the -requested information. - -### `++rave`, general subscription request - - ++ rave :: general request - $% [& p=mood] :: single request - [| p=moat] :: change range - == :: - -This represents a subscription request for a desk. The request can be -for either a single item in the desk or else for a range of changes on -the desk. - -### `++rove`, stored general subscription request - - ++ rove (each mood moot) :: stored request - -When we store a request, we store subscriptions with a little extra -information so that we can determine whether new versions actually -affect the path we're subscribed to. - -### `++mood`, single subscription request - - ++ mood ,[p=care q=case r=path] :: request in desk - -This represents a request for the state of the desk at a particular -commit, specfied by `q`. `p` specifies what kind of information is -desired, and `r` specifies the path we are requesting. - -### `++moat`, range subscription request - - ++ moat ,[p=case q=case r=path] :: change range - -This represents a request for all changes between `p` and `q` on path -`r`. You will be notified when a change is made to the node referenced -by the path or to any of its children. - -### `++moot`, stored range subscription request - - ++ moot ,[p=case q=case r=path s=(map path lobe)] :: - -This is just a `++moat` plus a map of paths to lobes. This map -represents the data at the node referenced by the path at case `p`, if -we've gotten to that case (else null). We only send a notification along -the subscription if the data at a new revision is different than it was. - -### `++care`, clay submode - - ++ care ?(%u %v %w %x %y %z) :: clay submode - -This specifies what type of information is requested in a subscription -or a scry. - -`%u` requests the `++rang` at the current moment. Because this -information is not stored for any moment other than the present, we -crash if the `++case` is not a `%da` for now. - -`%v` requests the `++dome` at the specified commit. - -`%w` requests the revsion number of the desk. - -`%x` requests the file at a specified path at the specified commit. If -there is no node at that path or if the node has no contents (that is, -if `q:ankh` is null), then this produces null. - -`%y` requests a `++arch` of the specfied commit at the specified path. - -`%z` requests the `++ankh` of the specified commit at the specfied path. - -### `++arch`, shallow filesystem node - - ++ arch ,[p=@uvI q=(unit ,@uvI) r=(map ,@ta ,~)] :: fundamental node - -This is analogous to `++ankh` except that the we have neither our -contents nor the ankhs of our children. The other fields are exactly the -same, so `p` is a hash of the associated ankh, `u.q`, if it exists, is a -hash of the contents of this node, and the keys of `r` are the names of -our children. `r` is a map to null rather than a set so that the -ordering of the map will be equivalent to that of `r:ankh`, allowing -efficient conversion. - -### `++case`, specifying a commit - - ++ case :: ship desk case spur - $% [%da p=@da] :: date - [%tas p=@tas] :: label - [%ud p=@ud] :: number - == :: - -A commit can be referred to in three ways: `%da` refers to the commit -that was at the head on date `p`, `%tas` refers to the commit labeled -`p`, and `%ud` refers to the commit numbered `p`. Note that since these -all can be reduced down to a `%ud`, only numbered commits may be -referenced with a `++case`. - -### `++dome`, desk data - - ++ dome :: project state - $: ang=agon :: pedigree - ank=ankh :: state - let=@ud :: top id - hit=(map ,@ud tako) :: changes by id - lab=(map ,@tas ,@ud) :: labels - == :: - -This is the data that is actually stored in a desk. - -`ang` is unused and should be removed. - -`ank` is the current state of the desk. Thus, it is the state of the -filesystem at revison `let`. The head of a desk is always a numbered -commit. - -`let` is the number of the most recently numbered commit. This is also -the total number of numbered commits. - -`hit` is a map of numerical ids to hashes of commits. These hashes are -mapped into their associated commits in `hut:rang`. In general, the keys -of this map are exactly the numbers from 1 to `let`, with no gaps. Of -course, when there are no numbered commits, `let` is 0, so `hit` is -null. Additionally, each of the commits is an ancestor of every commit -numbered greater than this one. Thus, each is a descendant of every -commit numbered less than this one. Since it is true that the date in -each commit (`t:yaki`) is no earlier than that of each of its parents, -the numbered commits are totally ordered in the same way by both -pedigree and date. Of course, not every commit is numbered. If that -sounds too complicated to you, don't worry about it. It basically -behaves exactly as you would expect. - -`lab` is a map of textual labels to numbered commits. Note that labels -can only be applied to numbered commits. Labels must be unique across a -desk. - -### `++ankh`, filesystem node - - ++ ankh :: fs node (new) - $: p=cash :: recursive hash - q=(unit ,[p=cash q=*]) :: file - r=(map ,@ta ankh) :: folders - == :: - -This is a single node in the filesystem. This may be file or a directory -or both. In earth filesystems, a node is a file xor a directory. On -mars, we're inclusive, so a node is a file ior a directory. - -`p` is a recursive hash that depends on the contents of the this file or -directory and on any children. - -`q` is the contents of this file, if any. `p.q` is a hash of the -contents while `q.q` is the data itself. - -`r` is the set of children of this node. In the case of a pure file, -this is empty. The keys are the names of the children and the values -are, recursively, the nodes themselves. - -### `++cash`, ankh hash - - ++ cash ,@uvH :: ankh hash - -This is a 128-bit hash of an ankh. These are mostly stored within ankhs -themselves, and they are used to check for changes in possibly-deep -hierarchies. - -### `++rung`, filesystem per neighbor ship - - ++ rung $: rus=(map desk rede) :: neighbor desks - == :: - -This is the filesystem of a neighbor ship. The keys to this map are all -the desks we know about on their ship. - -### `++rede`, desk state - - ++ rede :: universal project - $: lim=@da :: complete to - qyx=cult :: subscribers - ref=(unit rind) :: outgoing requests - dom=dome :: revision state - == :: - -This is our knowledge of the state of a desk, either foreign or -domestic. - -`lim` is the date of the last full update. We only respond to requests -for stuff before this time. - -`qyx` is the list of subscribers to this desk. For domestic desks, this -is simply `p:dojo`, all subscribers to the desk, while in foreign desks -this is all the subscribers from our ship to the foreign desk. - -`ref` is the request manager for the desk. For domestic desks, this is -null since we handle requests ourselves. - -`dom` is the actual data in the desk. - -### `++rind`, request manager - - ++ rind :: request manager - $: nix=@ud :: request index - bom=(map ,@ud ,[p=duct q=rave]) :: outstanding - fod=(map duct ,@ud) :: current requests - haw=(map mood (unit)) :: simple cache - == :: - -This is the request manager for a foreign desk. - -`nix` is one more than the index of the most recent request. Thus, it is -the next available request number. - -`bom` is the set of outstanding requests. The keys of this map are some -subset of the numbers between 0 and one less than `nix`. The members of -the map are exactly those requests that have not yet been fully -satisfied. - -`fod` is the same set as `bom`, but from a different perspective. In -particular, the values of `fod` are the same as the values of `bom`, and -the `p` out of the values of `bom` are the same as the keys of `fod`. -Thus, we can map ducts to their associated request number and `++rave`, -and we can map numbers to their associated duct and `++rave`. - -`haw` is a map from simple requests to their values. This acts as a -cache for requests that have already been made. Thus, the second request -for a particular `++mood` is nearly instantaneous. - -### `++rang`, data store - - ++ rang $: hut=(map tako yaki) :: - lat=(map lobe blob) :: - == :: - -This is a set of data keyed by hash. Thus, this is where the "real" data -is stored, but it is only meaningful if we know the hash of what we're -looking for. - -`hut` is a map from hashes to commits. We often get the hashes from -`hit:dome`, which keys them by logical id. Not every commit has an id. - -`lat` is a map from hashes to the actual data. We often get the hashes -from a `++yaki`, a commit, which references this map to get the data. -There is no `++blob` in any `++yaki`. They are only accessible through -this map. - -### `++tako`, commit reference - - ++ tako ,@ :: yaki ref - -This is a hash of a `++yaki`, a commit. These are most notably used as -the keys in `hut:rang`, where they are associated with the actual -`++yaki`, and as the values in `hit:dome`, where sequential ids are -associated with these. - -### `++yaki`, commit - - ++ yaki ,[p=(list tako) q=(map path lobe) r=tako t=@da] :: commit - -This is a single commit. - -`p` is a list of the hashes of the parents of this commit. In most -cases, this will be a single commit, but in a merge there may be more -parents. In theory, there may be an arbitrary number of parents, but in -practice merges have exactly two parents. This may change in the future. -For commit 1, there is no parent. - -`q` is a map of the paths on a desk to the data at that location. If you -understand what a `++lobe` and a `++blob` is, then the type signature -here tells the whole story. - -`r` is the hash associated with this commit. - -`t` is the date at which this commit was made. - -### `++lobe`, data reference - - ++ lobe ,@ :: blob ref - -This is a hash of a `++blob`. These are most notably used in `lat:rang`, -where they are associated with the actual `++blob`, and as the values in -`q:yaki`, where paths are associated with their data in a commit. - -### `++blob`, data - - ++ blob $% [%delta p=lobe q=lobe r=udon] :: delta on q - [%direct p=lobe q=* r=umph] :: - [%indirect p=lobe q=* r=udon s=lobe] :: - == :: - -This is a node of data. In every case, `p` is the hash of the blob. - -`%delta` is the case where we define the data by a delta on other data. -In practice, the other data is always the previous commit, but nothing -depends on this. `q` is the hash of the parent blob, and `r` is the -delta. - -`%direct` is the case where we simply have the data directly. `q` is the -data itself, and `r` is any preprocessing instructions. These almost -always come from the creation of a file. - -`%indirect` is both of the preceding cases at once. `q` is the direct -data, `r` is the delta, and `s` is the parent blob. It should always be -the case that applying `r` to `s` gives the same data as `q` directly -(with the prepreprocessor instructions in `p.r`). This exists purely for -performance reasons. This is unused, at the moment, but in general these -should be created when there are a long line of changes so that we do -not have to traverse the delta chain back to the creation of the file. - -### `++udon`, abstract delta - - ++ udon :: abstract delta - $: p=umph :: preprocessor - $= q :: patch - $% [%a p=* q=*] :: trivial replace - [%b p=udal] :: atomic indel - [%c p=(urge)] :: list indel - [%d p=upas q=upas] :: tree edit - == :: - == :: - -This is an abstract change to a file. This is a superset of what would -normally be called diffs. Diffs usually refer to changes in lines of -text while we have the ability to do more interesting deltas on -arbitrary data structures. - -`p` is any preprocessor instructions. - -`%a` refers to the trival delta of a complete replace of old data with -new data. - -`%b` refers to changes in an opaque atom on the block level. This has -very limited usefulness, and is not used at the moment. - -`%c` refers to changes in a list of data. This is often lines of text, -which is your classic diff. We, however, will work on any list of data. - -`%d` refers to changes in a tree of data. This is general enough to -describe changes to any hoon noun, but often more special-purpose delta -should be created for different content types. This is not used at the -moment, and may in fact be unimplemented. - -### `++urge`, list change - - ++ urge |*(a=_,* (list (unce a))) :: list change - -This is a parametrized type for list changes. For example, `(urge ,@t)` -is a list change for lines of text. - -### `++unce`, change part of a list. - - ++ unce |* a=_,* :: change part - $% [%& p=@ud] :: skip[copy] - [%| p=(list a) q=(list a)] :: p -> q[chunk] - == :: - -This is a single change in a list of elements of type `a`. For example, -`(unce ,@t)` is a single change in a lines of text. - -`%&` means the next `p` lines are unchanged. - -`%|` means the lines `p` have changed to `q`. - -### `++umph`, preprocessing information - - ++ umph :: change filter - $| $? %a :: no filter - %b :: jamfile - %c :: LF text - == :: - $% [%d p=@ud] :: blocklist - == :: - -This space intentionally left undocumented. This stuff will change once -we get a well-typed clay. - -### `++upas`, tree change - - ++ upas :: tree change (%d) - $& [p=upas q=upas] :: cell - $% [%0 p=axis] :: copy old - [%1 p=*] :: insert new - [%2 p=axis q=udon] :: mutate! - == :: - -This space intentionally left undocumented. This stuff is not known to -work, and will likely change when we get a well-typed clay. Also, this -is not a complicated type; it is not difficult to work out the meaning. - -### `++nori`, repository action - - ++ nori :: repository action - $% [& q=soba] :: delta - [| p=@tas] :: label - == :: - -This describes a change that we are asking clay to make to the desk. -There are two kinds of changes that may be made: we can modify files or -we can apply a label to a commit. - -In the `|` case, we will simply label the current commit with the given -label. In the `&` case, we will apply the given changes. - -### `++soba`, delta - - ++ soba ,[p=cart q=(list ,[p=path q=miso])] :: delta - -This describes a set of changes to make to a desk. The `cart` is simply -a pair of the old hash and the new hash of the desk. The list is a list -of changes keyed by the file they're changing. Thus, the paths are paths -to files to be changed while `miso` is a description of the change -itself. - -### `++miso`, ankh delta - - ++ miso :: ankh delta - $% [%del p=*] :: delete - [%ins p=*] :: insert - [%mut p=udon] :: mutate - == :: - -There are three kinds of changes that may be made to a node in a desk. -We can insert a file, in which case `p` is the contents of the new file. -We can delete a file, in which case `p` is the contents of the old file. -Finally, we can mutate that file, in which case the `udon` describes the -changes we are applying to the file. - -### `++mizu`, merged state - - ++ mizu ,[p=@u q=(map ,@ud tako) r=rang] :: new state - -This is the input to the `%merg` kiss, which allows us to perform a -merge. The `p` is the number of the new head commit. The `q` is a map -from numbers to commit hashes. This is all the new numbered commits that -are to be inserted. The keys to this should always be the numbers from -`let.dom` plus one to `p`, inclusive. The `r` is the maps of all the new -commits and data. Since these are merged into the current state, no old -commits or data need be here. - -### `++riff`, request/desist - - ++ riff ,[p=desk q=(unit rave)] :: request/desist - -This represents a request for data about a particular desk. If `q` -contains a `rave`, then this opens a subscription to the desk for that -data. If `q` is null, then this tells clay to cancel the subscription -along this duct. - -### `++riot`, response - - ++ riot (unit rant) :: response/complete - -A riot is a response to a subscription. If null, the subscription has -been completed, and no more responses will be sent. Otherwise, the -`rant` is the produced data. - -### `++rant`, response data - - ++ rant :: namespace binding - $: p=[p=care q=case r=@tas] :: clade release book - q=path :: spur - r=* :: data - == :: - -This is the data at a particular node in the filesystem. `p.p` specifies -the type of data that was requested (and is produced). `q.p` gives the -specific version reported (since a range of versions may be requested in -a subscription). `r.p` is the desk. `q` is the path to the filesystem -node. `r` is the data itself (in the format specified by `p.p`). - -### `++nako`, subscription response data - - ++ nako $: gar=(map ,@ud tako) :: new ids - let=@ud :: next id - lar=(set yaki) :: new commits - bar=(set blob) :: new content - == :: - -This is the data that is produced by a request for a range of revisions -of a desk. This allows us to easily keep track of a remote repository -- -all the new information we need is contained in the `nako`. - -`gar` is a map of the revisions in the range to the hash of the commit -at that revision. These hashes can be used with `hut:rang` to find the -commit itself. - -`let` is either the last revision number in the range or the most recent -revision number, whichever is smaller. - -`lar` is the set of new commits, and `bar` is the set of new content. - -Public Interface ----------------- - -As with all vanes, there are exactly two ways to interact with clay. -`%clay` exports a namespace accessible through `.^`, which is described -above under `++care`. The primary way of interacting with clay, though, -is by sending kisses and receiving gifts. - - ++ gift :: out result <-$ - $% [%ergo p=@p q=@tas r=@ud] :: version update - [%note p=@tD q=tank] :: debug message - [%writ p=riot] :: response - == :: - ++ kiss :: in request ->$ - $% [%info p=@p q=@tas r=nori] :: internal edit - [%ingo p=@p q=@tas r=nori] :: internal noun edit - [%init p=@p] :: report install - [%into p=@p q=@tas r=nori] :: external edit - [%invo p=@p q=@tas r=nori] :: external noun edit - [%merg p=@p q=@tas r=mizu] :: internal change - [%wart p=sock q=@tas r=path s=*] :: network request - [%warp p=sock q=riff] :: file request - == :: - -There are only a small number of possible kisses, so it behooves us to -describe each in detail. - - $% [%info p=@p q=@tas r=nori] :: internal edit - - [%into p=@p q=@tas r=nori] :: external edit - -These two kisses are nearly identical. At a high level, they apply -changes to the filesystem. Whenever we add, remove, or edit a file, one -of these cards is sent. The `p` is the ship whose filesystem we're -trying to change, the `q` is the desk we're changing, and the `r` is the -request change. For the format of the requested change, see the -documentation for `++nori` above. - -When a file is changed in the unix filesystem, vere will send a `%into` -kiss. This tells clay that the duct over which the kiss was sent is the -duct that unix is listening on for changes. From within Arvo, though, we -should never send a `%into` kiss. The `%info` kiss is exactly identical -except it does not reset the duct. - - [%ingo p=@p q=@tas r=nori] :: internal noun edit - - [%invo p=@p q=@tas r=nori] :: external noun edit - -These kisses are currently identical to `%info` and `%into`, though this -will not always be the case. The intent is for these kisses to allow -typed changes to clay so that we may store typed data. This is currently -unimplemented. - - [%init p=@p] :: report install - -Init is called when a ship is started on our pier. This simply creates a -default `room` to go into our `raft`. Essentially, this initializes the -filesystem for a ship. - - [%merg p=@p q=@tas r=mizu] :: internal change - -This is called to perform a merge. This is most visibly called by -:update to update the filesystem of the current ship to that of its -sein. The `p` and `q` are as in `%info`, and the `r` is the description -of the merge. See `++mizu` above. - -XX -`XX [%wake ~] :: timer activate XX` -XX\ -XX This card is sent by unix at the time specified by `++doze`. This -time is XX usually the closest time specified in a subscription request. -When `%wake` is XX called, we update our subscribers if there have been -any changes. - - [%wart p=sock q=@tas r=path s=*] :: network request - -This is a request that has come across the network for a particular -file. When another ship asks for a file from us, that request comes to -us in the form of a `%wart` kiss. This is handled by trivially turning -it into a `%warp`. - - [%warp p=sock q=riff] :: file request - -This is a request for information about a particular desk. This is, in -its most general form, a subscription, though in many cases it is the -trivial case of a subscription -- a read. See `++riff` for the format of -the request. - -Lifecycle of a Local Read -------------------------- - -There are two real types of interaction with a filesystem: you can read, -and you can write. We'll describe each process, detailing both the flow -of control followed by the kernel and the algorithms involved. The -simpler case is that of the read, so we'll begin with that. - -When a vane or an application wishes to read a file from the filesystem, -it sends a `%warp` kiss, as described above. Of course, you may request -a file on another ship and, being a global filesystem, clay will happily -produce it for you. That code pathway will be described in another -section; here, we will restrict ourselves to examining the case of a -read from a ship on our own pier. - -The kiss can request either a single version of a file node or a range -of versions of a desk. Here, we'll deal only with a request for a single -version. - -As in all vanes, a kiss enters clay via a call to `++call`. Scanning -through the arm, we quickly see where `%warp` is handled. - - ?: =(p.p.q.hic q.p.q.hic) - =+ une=(un p.p.q.hic now ruf) - =+ wex=(di:une p.q.q.hic) - =+ ^= wao - ?~ q.q.q.hic - (ease:wex hen) - (eave:wex hen u.q.q.q.hic) - =+ ^= woo - abet:wao - [-.woo abet:(pish:une p.q.q.hic +.woo ran.wao)] - -We're following the familar patern of producing a list of moves and an -updated state. In this case, the state is `++raft`. - -We first check to see if the sending and receiving ships are the same. -If they're not, then this is a request for data on another ship. We -describe that process later. Here, we discuss only the case of a local -read. - -At a high level, the call to `++un` sets up the core for the domestic -ship that contains the files we're looking for. The call to `++di` sets -up the core for the particular desk we're referring to. - -After this, we perform the actual request. If there is no rave in the -riff, then that means we are cancelling a request, so we call -`++ease:de`. Otherwise, we start a subscription with `++eave:de`. We -call `++abet:de` to resolve our various types of output into actual -moves. We produce the moves we found above and the `++un` core resolved -with `++pish:un` (putting the modified desk in the room) and `++abet:un` -(putting the modified room in the raft). - -Much of this is fairly straightforward, so we'll only describe `++ease`, -`++eave`, and `++abet:de`. Feel free to look up the code to the other -steps -- it should be easy to follow. - -Although it's called last, it's usually worth examining `++abet` first, -since it defines in what ways we can cause side effects. Let's do that, -and also a few of the lines at the beginning of `++de`. - - =| yel=(list ,[p=duct q=gift]) - =| byn=(list ,[p=duct q=riot]) - =| vag=(list ,[p=duct q=gift]) - =| say=(list ,[p=duct q=path r=ship s=[p=@ud q=riff]]) - =| tag=(list ,[p=duct q=path c=note]) - |% - ++ abet - ^- [(list move) rede] - :_ red - ;: weld - %+ turn (flop yel) - |=([a=duct b=gift] [hun %give b]) - :: - %+ turn (flop byn) - |=([a=duct b=riot] [a %give [%writ b]]) - :: - %+ turn (flop vag) - |=([a=duct b=gift] [a %give b]) - :: - %+ turn (flop say) - |= [a=duct b=path c=ship d=[p=@ud q=riff]] - :- a - [%pass b %a %want [who c] [%q %re p.q.d (scot %ud p.d) ~] q.d] - :: - %+ turn (flop tag) - |=([a=duct b=path c=note] [a %pass b c]) - == - -This is very simple code. We see there are exactly five different kinds -of side effects we can generate. - -In `yel` we put gifts that we wish to be sent along the `hun:room` duct -to dill. See the documentation for `++room` above. This is how we -display messages to the terminal. - -In `byn` we put riots that we wish returned to subscribers. Recall that -a riot is a response to a subscription. These are returned to our -subscribers in the form of a `%writ` gift. - -In `vag` we put gifts along with the ducts on which to send them. This -allows us to produce arbitrary gifts, but in practice this is only used -to produce `%ergo` gifts. - -In `say` we put messages we wish to pass to ames. These messages are -used to request information from clay on other piers. We must provide -not only the duct and the request (the riff), but also the return path, -the other ship to talk to, and the sequence number of the request. - -In `tag` we put arbitrary notes we wish to pass to other vanes. For now, -the only notes we pass here are `%wait` and `%rest` to the timer vane. - -Now that we know what kinds of side effects we may have, we can jump -into the handling of requests. - - ++ ease :: release request - |= hen=duct - ^+ +> - ?~ ref +> - =+ rov=(~(got by qyx) hen) - =. qyx (~(del by qyx) hen) - (mabe rov (cury best hen)) - =. qyx (~(del by qyx) hen) - |- ^+ +>+.$ - =+ nux=(~(get by fod.u.ref) hen) - ?~ nux +>+.$ - %= +>+.$ - say [[hen [(scot %ud u.nux) ~] for [u.nux syd ~]] say] - fod.u.ref (~(del by fod.u.ref) hen) - bom.u.ref (~(del by bom.u.ref) u.nux) - == - -This is called when we're cancelling a subscription. For domestic desks, -`ref` is null, so we're going to cancel any timer we might have created. -We first delete the duct from our map of requests, and then we call -`++mabe` with `++best` to send a `%rest` kiss to the timer vane if we -have started a timer. We'll describe `++best` and `++mabe` momentarily. - -Although we said we're not going to talk about foreign requests yet, -it's easy to see that for foreign desks, we cancel any outstanding -requests for this duct and send a message over ames to the other ship -telling them to cancel the subscription. - - ++ best - |= [hen=duct tym=@da] - %_(+> tag :_(tag [hen /tyme %t %rest tym])) - -This simply pushes a `%rest` note onto `tag`, from where it will be -passed back to arvo to be handled. This cancels the timer at the given -duct (with the given time). - - ++ mabe :: maybe fire function - |* [rov=rove fun=$+(@da _+>.^$)] - ^+ +>.$ - %- fall :_ +>.$ - %- bind :_ fun - ^- (unit ,@da) - ?- -.rov - %& - ?. ?=(%da -.q.p.rov) ~ - `p.q.p.rov - %| - =* mot p.rov - %+ hunt - ?. ?=(%da -.p.mot) ~ - ?.((lth now p.p.mot) ~ [~ p.p.mot]) - ?. ?=(%da -.q.mot) ~ - ?.((lth now p.q.mot) [~ now] [~ p.q.mot]) - == - -This decides whether the given request can only be satsified in the -future. In that case, we call the given function with the time in the -future when we expect to have an update to give to this request. This is -called with `++best` to cancel timers and with `++bait` to start them. - -For single requests, we have a time if the request is for a particular -time (which is assumed to be in the future). For ranges of requests, we -check both the start and end cases to see if they are time cases. If so, -we choose the earlier time. - -If any of those give us a time, then we call the given funciton with the -smallest time. - -The more interesting case is, of course, when we're not cancelling a -subscription but starting one. - - ++ eave :: subscribe - |= [hen=duct rav=rave] - ^+ +> - ?- -.rav - & - ?: &(=(p.p.rav %u) !=(p.q.p.rav now)) - ~& [%clay-fail p.q.p.rav %now now] - !! - =+ ver=(aver p.rav) - ?~ ver - (duce hen rav) - ?~ u.ver - (blub hen) - (blab hen p.rav u.u.ver) - -There are two types of subscriptions -- either we're requesting a single -file or we're requesting a range of versions of a desk. We'll dicuss the -simpler case first. - -First, we check that we're not requesting the `rang` from any time other -than the present. Since we don't store that information for any other -time, we can't produce it in a referentially transparent manner for any -time other than the present. - -Then, we try to read the requested `mood` `p.rav`. If we can't access -the request data right now, we call `++duce` to put the request in our -queue to be satisfied when the information becomes available. - -This case occurs when we make a request for a case whose (1) date is -after the current date, (2) number is after the current number, or (3) -label is not yet used. - - ++ duce :: produce request - |= [hen=duct rov=rove] - ^+ +> - =. qyx (~(put by qyx) hen rov) - ?~ ref - (mabe rov (cury bait hen)) - |- ^+ +>+.$ :: XX why? - =+ rav=(reve rov) - =+ ^= vaw ^- rave - ?. ?=([%& %v *] rav) rav - [%| [%ud let.dom] `case`q.p.rav r.p.rav] - =+ inx=nix.u.ref - %= +>+.$ - say [[hen [(scot %ud inx) ~] for [inx syd ~ vaw]] say] - nix.u.ref +(nix.u.ref) - bom.u.ref (~(put by bom.u.ref) inx [hen vaw]) - fod.u.ref (~(put by fod.u.ref) hen inx) - == - -The code for `++duce` is nearly the exact inverse of `++ease`, which in -the case of a domestic desk is very simple -- we simply put the duct and -rave into `qyx` and possibly start a timer with `++mabe` and `++bait`. -Recall that `ref` is null for domestic desks and that `++mabe` fires the -given function with the time we need to be woken up at, if we need to be -woken up at a particular time. - - ++ bait - |= [hen=duct tym=@da] - %_(+> tag :_(tag [hen /tyme %t %wait tym])) - -This sets an alarm by sending a `%wait` card with the given time to the -timer vane. - -Back in `++eave`, if `++aver` returned `[~ ~]`, then we cancel the -subscription. This occurs when we make (1) a `%x` request for a file -that does not exist, (2) a `%w` request with a case that is not a -number, or (3) a `%w` request with a nonempty path. The `++blub` is -exactly what you would expect it to be. - - ++ blub :: ship stop - |= hen=duct - %_(+> byn [[hen ~] byn]) - -We notify the duct that we're cancelling their subscription since it -isn't satisfiable. - -Otherwise, we have received the desired information, so we send it on to -the subscriber with `++blab`. - - ++ blab :: ship result - |= [hen=duct mun=mood dat=*] - ^+ +> - +>(byn [[hen ~ [p.mun q.mun syd] r.mun dat] byn]) - -The most interesting arm called in `++eave` is, of course, `++aver`, -where we actually try to read the data. - - ++ aver :: read - |= mun=mood - ^- (unit (unit ,*)) - ?: &(=(p.mun %u) !=(p.q.mun now)) :: prevent bad things - ~& [%clay-fail p.q.mun %now now] - !! - =+ ezy=?~(ref ~ (~(get by haw.u.ref) mun)) - ?^ ezy ezy - =+ nao=(~(case-to-aeon ze lim dom ran) q.mun) - ?~(nao ~ [~ (~(read-at-aeon ze lim dom ran) u.nao mun)]) - -We check immediately that we're not requesting the `rang` for any time -other than the present. - -If this is a foreign desk, then we check our cache for the specific -request. If either this is a domestic desk or we don't have the request -in our cache, then we have to actually go read the data from our dome. - -We need to do two things. First, we try to find the number of the commit -specified by the given case, and then we try to get the data there. - -Here, we jump into `arvo/zuse.hoon`, which is where much of the -algorithmic code is stored, as opposed to the clay interface, which is -stored in `arvo/clay.hoon`. We examine `++case-to-aeon:ze`. - - ++ case-to-aeon :: case-to-aeon:ze - |= lok=case :: act count through - ^- (unit aeon) - ?- -.lok - %da - ?: (gth p.lok lim) ~ - |- ^- (unit aeon) - ?: =(0 let) [~ 0] :: avoid underflow - ?: %+ gte p.lok - =< t - %- tako-to-yaki - %- aeon-to-tako - let - [~ let] - $(let (dec let)) - :: - %tas (~(get by lab) p.lok) - %ud ?:((gth p.lok let) ~ [~ p.lok]) - == - -We handle each type of `case` differently. The latter two types are -easy. - -If we're requesting a revision by label, then we simply look up the -requested label in `lab` from the given dome. If it exists, that is our -aeon; else we produce null, indicating the requested revision does not -yet exist. - -If we're requesting a revision by number, we check if we've yet reached -that number. If so, we produce the number; else we produce null. - -If we're requesting a revision by date, we check first if the date is in -the future, returning null if so. Else we start from the most recent -revision and scan backwards until we find the first revision committed -before that date, and we produce that. If we requested a date before any -revisions were committed, we produce `0`. - -The definitions of `++aeon-to-tako` and `++tako-to-yaki` are trivial. - - ++ aeon-to-tako ~(got by hit) - - ++ tako-to-yaki ~(got by hut) :: grab yaki - -We simply look up the aeon or tako in their respective maps (`hit` and -`hut`). - -Assuming we got a valid version number, `++aver` calls -`++read-at-aeon:ze`, which reads the requested data at the given -revision. - - ++ read-at-aeon :: read-at-aeon:ze - |= [oan=aeon mun=mood] :: seek and read - ^- (unit) - ?: &(?=(%w p.mun) !?=(%ud -.q.mun)) :: NB only for speed - ?^(r.mun ~ [~ oan]) - (read:(rewind oan) mun) - -If we're requesting the revision number with a case other than by -number, then we go ahead and just produce the number we were given. -Otherwise, we call `++rewind` to rewind our state to the given revision, -and then we call `++read` to get the requested information. - - ++ rewind :: rewind:ze - |= oan=aeon :: rewind to aeon - ^+ +> - ?: =(let oan) +> - ?: (gth oan let) !! :: don't have version - +>(ank (checkout-ankh q:(tako-to-yaki (aeon-to-tako oan))), let oan) - -If we're already at the requested version, we do nothing. If we're -requesting a version later than our head, we are unable to comply. - -Otherwise, we get the hash of the commit at the number, and from that we -get the commit itself (the yaki), which has the map of path to lobe that -represents a version of the filesystem. We call `++checkout-ankh` to -checkout the commit, and we replace `ank` in our context with the -result. - - ++ checkout-ankh :: checkout-ankh:ze - |= hat=(map path lobe) :: checkout commit - ^- ankh - %- cosh - %+ roll (~(tap by hat) ~) - |= [[pat=path bar=lobe] ank=ankh] - ^- ankh - %- cosh - ?~ pat - =+ zar=(lobe-to-noun bar) - ank(q [~ (sham zar) zar]) - =+ nak=(~(get by r.ank) i.pat) - %= ank - r %+ ~(put by r.ank) i.pat - $(pat t.pat, ank (fall nak _ankh)) - == - -Twice we call `++cosh`, which hashes a commit, updating `p` in an -`ankh`. Let's jump into that algorithm before we describe -`++checkout-ankh`. - - ++ cosh :: locally rehash - |= ank=ankh :: NB v/unix.c - ank(p rehash:(zu ank)) - -We simply replace `p` in the hash with the `cash` we get from a call to -`++rehash:zu`. - - ++ zu !: :: filesystem - |= ank=ankh :: filesystem state - =| myz=(list ,[p=path q=miso]) :: changes in reverse - =| ram=path :: reverse path into - |% - ++ rehash :: local rehash - ^- cash - %+ mix ?~(q.ank 0 p.u.q.ank) - =+ axe=1 - |- ^- cash - ?~ r.ank _@ - ;: mix - (shaf %dash (mix axe (shaf %dush (mix p.n.r.ank p.q.n.r.ank)))) - $(r.ank l.r.ank, axe (peg axe 2)) - $(r.ank r.r.ank, axe (peg axe 3)) - == - -`++zu` is a core we set up with a particular filesystem node to traverse -a checkout of the filesystem and access the actual data inside it. One -of the things we can do with it is to create a recursive hash of the -node. - -In `++rehash`, if this node is a file, then we xor the remainder of the -hash with the hash of the contents of the file. The remainder of the -hash is `0` if we have no children, else we descend into our children. -Basically, we do a half SHA-256 of the xor of the axis of this child and -the half SHA-256 of the xor of the name of the child and the hash of the -child. This is done for each child and all the results are xored -together. - -Now we return to our discussion of `++checkout-ankh`. - -We fold over every path in this version of the filesystem and create a -great ankh out of them. First, we call `++lobe-to-noun` to get the raw -data referred to be each lobe. - - ++ lobe-to-noun :: grab blob - |= p=lobe :: ^- * - %- blob-to-noun - (lobe-to-blob p) - -This converts a lobe into the raw data it refers to by first getting the -blob with `++lobe-to-blob` and converting that into data with -`++blob-to-noun`. - - ++ lobe-to-blob ~(got by lat) :: grab blob - -This just grabs the blob that the lobe refers to. - - ++ blob-to-noun :: grab blob - |= p=blob - ?- -.p - %delta (lump r.p (lobe-to-noun q.p)) - %direct q.p - %indirect q.p - == - -If we have either a direct or an indirect blob, then the data is stored -right in the blob. Otherwise, we have to reconstruct it from the diffs. -We do this by calling `++lump` on the diff in the blob with the data -obtained by recursively calling the parent of this blob. - - ++ lump :: apply patch - |= [don=udon src=*] - ^- * - ?+ p.don ~|(%unsupported !!) - %a - ?+ -.q.don ~|(%unsupported !!) - %a q.q.don - %c (lurk ((hard (list)) src) p.q.don) - %d (lure src p.q.don) - == - :: - %c - =+ dst=(lore ((hard ,@) src)) - %- roly - ?+ -.q.don ~|(%unsupported !!) - %a ((hard (list ,@t)) q.q.don) - %c (lurk dst p.q.don) - == - == - -This is defined in `arvo/hoon.hoon` for historical reasons which are -likely no longer applicable. Since the `++umph` structure will likely -change we convert clay to be a typed filesystem, we'll only give a -high-level description of this process. If we have a `%a` udon, then -we're performing a trivial replace, so we produce simply `q.q.don`. If -we have a `%c` udon, then we're performing a list merge (as in, for -example, lines of text). The merge is performed by `++lurk`. - - ++ lurk :: apply list patch - |* [hel=(list) rug=(urge)] - ^+ hel - =+ war=`_hel`~ - |- ^+ hel - ?~ rug (flop war) - ?- -.i.rug - & - %= $ - rug t.rug - hel (slag p.i.rug hel) - war (weld (flop (scag p.i.rug hel)) war) - == - :: - | - %= $ - rug t.rug - hel =+ gur=(flop p.i.rug) - |- ^+ hel - ?~ gur hel - ?>(&(?=(^ hel) =(i.gur i.hel)) $(hel t.hel, gur t.gur)) - war (weld q.i.rug war) - == - == - -We accumulate our final result in `war`. If there's nothing more in our -list of merge instructions (unces), we just reverse `war` and produce -it. Otherwise, we process another unce. If the unce is of type `&`, then -we have `p.i.rug` lines of no changes, so we just copy them over from -`hel` to `war`. If the unice is of type `|`, then we verify that the -source lines (in `hel`) are what we expect them to be (`p.i.rug`), -crashing on failure. If they're good, then we append the new lines in -`q.i.rug` onto `war`. - -And that's really it. List merges are pretty easy. Anyway, if you -recall, we were discussing `++checkout-ankh`. - - ++ checkout-ankh :: checkout-ankh:ze - |= hat=(map path lobe) :: checkout commit - ^- ankh - %- cosh - %+ roll (~(tap by hat) ~) - |= [[pat=path bar=lobe] ank=ankh] - ^- ankh - %- cosh - ?~ pat - =+ zar=(lobe-to-noun bar) - ank(q [~ (sham zar) zar]) - =+ nak=(~(get by r.ank) i.pat) - %= ank - r %+ ~(put by r.ank) i.pat - $(pat t.pat, ank (fall nak _ankh)) - == - -If the path is null, then we calculate `zar`, the raw data at the path -`pat` in this version. We produce the given ankh with the correct data. - -Otherwise, we try to get the child we're looking at from our parent -ankh. If it's already been created, this succeeds; otherwise, we simply -create a default blank ankh. We place ourselves in our parent after -recursively computing our children. - -This algorithm really isn't that complicated, but it may not be -immediately obvious why it works. An example should clear everything up. - -Suppose `hat` is a map of the following information. - - /greeting --> "customary upon meeting" - /greeting/english --> "hello" - /greeting/spanish --> "hola" - /greeting/russian/short --> "привет" - /greeting/russian/long --> "Здравствуйте" - /farewell/russian --> "до свидания" - -Furthermore, let's say that we process them in this order: - - /greeting/english - /greeting/russian/short - /greeting/russian/long - /greeting - /greeting/spanish - /farewell/russian - -Then, the first path we process is `/greeting/english` . Since our path -is not null, we try to get `nak`, but because our ankh is blank at this -point it doesn't find anything. Thus, update our blank top-level ankh -with a child `%greeting`. and recurse with the blank `nak` to create the -ankh of the new child. - -In the recursion, we our path is `/english` and our ankh is again blank. -We try to get the `english` child of our ankh, but this of course fails. -Thus, we update our blank `/greeting` ankh with a child `english` -produced by recursing. - -Now our path is null, so we call `++lobe-to-noun` to get the actual -data, and we place it in the brand-new ankh. - -Next, we process `/greeting/russian/short`. Since our path is not null, -we try to get the child named `%greeting`, which does exist since we -created it earlier. We put modify this child by recursing on it. Our -path is now `/russian/short`, so we look for a `%russian` child in our -`/greeting` ankh. This doesn't exist, so we add it by recursing. Our -path is now `/short`, so we look for a `%short` child in our -`/greeting/russian` ankh. This doesn't exist, so we add it by recursing. -Now our path is null, so we set the contents of this node to `"привет"`, -and we're done processing this path. - -Next, we process `/greeting/russian/long`. This proceeds similarly to -the previous except that now the ankh for `/greeting/russian` already -exists, so we simply reuse it rather than creating a new one. Of course, -we still must create a new `/greeting/russian/long` ankh. - -Next, we process `/greeting`. This ankh already exists, so after we've -recursed once, our path is null, and our ankh is not blank -- it already -has two children (and two grandchildren). We don't touch those, though, -since a node may be both a file and a directory. We just add the -contents of the file -- "customary upon meeting" -- to the existing -ankh. - -Next, we process `/greeting/spanish`. Of course, the `/greeting` ankh -already exists, but it doesn't have a `%spanish` child, so we create -that, taking care not to disturb the contents of the `/greeting` file. -We put "hola" into the ankh and call it good. - -Finally, we process `/farewell/russian`. Here, the `/farewell` ankh -doesn't exist, so we create it. Clearly the newly-created ankh doesn't -have any children, so we have to add a `%russian` child, and in this -child we put our last content -- "до свидания". - -We hope it's fairly obvious that the order we process the paths doesn't -affect the final ankh tree. The tree will be constructed in a very -different order depending on what order the paths come in, but the -resulting tree is independent of order. - -At any rate, we were talking about something important, weren't we? If -you recall, that concludes our discussion of `++rewind`, which was -called from `++read-at-aeon`. In summary, `++rewind` returns a context -in which our current state is (very nearly) as it was when the specified -version of the desk was the head. This allows `++read-at-aeon` to call -`++read` to read the requested information. - - ++ read :: read:ze - |= mun=mood :: read at point - ^- (unit) - ?: ?=(%v p.mun) - [~ `dome`+<+<.read] - ?: &(?=(%w p.mun) !?=(%ud -.q.mun)) - ?^(r.mun ~ [~ let]) - ?: ?=(%w p.mun) - =+ ^= yak - %- tako-to-yaki - %- aeon-to-tako - let - ?^(r.mun ~ [~ [t.yak (forge-nori yak)]]) - ::?> ?=(^ hit) ?^(r.mun ~ [~ i.hit]) :: what do?? need [@da nori] - (query(ank ank:(descend-path:(zu ank) r.mun)) p.mun) - -If we're requesting the dome, then we just return that immediately. - -If we're requesting the revision number of the desk and we're not -requesting it by number, then we just return the current number of this -desk. Note of course that this was really already handled in -`++read-at-aeon`. - -If we're requesting a `%w` with a specific revision number, then we do -something or other with the commit there. It's kind of weird, and it -doesn't seem to work, so we'll ignore this case. - -Otherwise, we descend into the ankh tree to the given path with -`++descend-path:zu`, and then we handle specific request in `++query`. - - ++ descend-path :: descend recursively - |= way=path - ^+ +> - ?~(way +> $(way t.way, +> (descend i.way))) - -This is simple recursion down into the ankh tree. `++descend` descends -one level, so this will eventually get us down to the path we want. - - ++ descend :: descend - |= lol=@ta - ^+ +> - =+ you=(~(get by r.ank) lol) - +>.$(ram [lol ram], ank ?~(you [*cash ~ ~] u.you)) - -`ram` is the path that we're at, so to descend one level we push the -name of this level onto that path. We update the ankh with the correct -one at that path if it exists; else we create a blank one. - -Once we've decscended to the correct level, we need to actually deal -with the request. - - ++ query :: query:ze - |= ren=?(%u %v %x %y %z) :: endpoint query - ^- (unit ,*) - ?- ren - %u [~ `rang`+<+>.query] - %v [~ `dome`+<+<.query] - %x ?~(q.ank ~ [~ q.u.q.ank]) - %y [~ as-arch] - %z [~ ank] - == - -Now that everything's set up, it's really easy. If they're requesting -the rang, dome, or ankh, we give it to them. If the contents of a file, -we give it to them if it is in fact a file. If the `arch`, then we -calculate it with `++as-arch`. - - ++ as-arch :: as-arch:ze - ^- arch :: arch report - :+ p.ank - ?~(q.ank ~ [~ p.u.q.ank]) - |- ^- (map ,@ta ,~) - ?~ r.ank ~ - [[p.n.r.ank ~] $(r.ank l.r.ank) $(r.ank r.r.ank)] - -This very simply strips out all the "real" data and returns just our own -hash, the hash of the file contents (if we're a file), and a map of the -names of our immediate children. - -Lifecycle of a Local Subscription ---------------------------------- - -A subscription to a range of revisions of a desk initially follows the -same path that a single read does. In `++aver`, we checked the head of -the given rave. If the head was `&`, then it was a single request, so we -handled it above. If `|`, then we handle it with the following code. - - =+ nab=(~(case-to-aeon ze lim dom ran) p.p.rav) - ?~ nab - ?> =(~ (~(case-to-aeon ze lim dom ran) q.p.rav)) - (duce hen (rive rav)) - =+ huy=(~(case-to-aeon ze lim dom ran) q.p.rav) - ?: &(?=(^ huy) |((lth u.huy u.nab) &(=(0 u.huy) =(0 u.nab)))) - (blub hen) - =+ top=?~(huy let.dom u.huy) - =+ sar=(~(lobes-at-path ze lim dom ran) u.nab r.p.rav) - =+ ear=(~(lobes-at-path ze lim dom ran) top r.p.rav) - =. +>.$ - ?: =(sar ear) +>.$ - =+ fud=(~(make-nako ze lim dom ran) u.nab top) - (bleb hen u.nab fud) - ?^ huy - (blub hen) - =+ ^= ptr ^- case - [%ud +(let.dom)] - (duce hen `rove`[%| ptr q.p.rav r.p.rav ear]) - == - -Recall that `++case-to-aeon:ze` produces the revision number that a case -corresponds to, if it corresponds to any. If it doesn't yet correspond -to a revision, then it produces null. - -Thus, we first check to see if we've even gotten to the beginning of the -range of revisions requested. If not, then we assert that we haven't yet -gotten to the end of the range either, because that would be really -strange. If not, then we immediately call `++duce`, which, if you -recall, for a local request, simply puts this duct and rove into our -cult `qyx`, so that we know who to respond to when the revision does -appear. - -If we've already gotten to the first revision, then we can produce some -content immediately. If we've also gotten to the final revision, and -that revision is earlier than the start revision, then it's a bad -request and we call `++blub`, which tells the subscriber that his -subscription will not be satisfied. - -Otherwise, we find the data at the given path at the beginning of the -subscription and at the last available revision in the subscription. If -they're the same, then we don't send a notification. Otherwise, we call -`++gack`, which creates the `++nako` we need to produce. We call -`++bleb` to actually produce the information. - -If we already have the last requested revision, then we also tell the -subscriber with `++blub` that the subscription will receive no further -updates. - -If there will be more revisions in the subscription, then we call -`++duce`, adding the duct to our subscribers. We modify the rove to -start at the next revision since we've already handled all the revisions -up to the present. - -We glossed over the calls to `++lobes-at-path`, `++make-nako`, and -`++bleb`, so we'll get back to those right now. `++bleb` is simple, so -we'll start with that. - - ++ bleb :: ship sequence - |= [hen=duct ins=@ud hip=nako] - ^+ +> - (blab hen [%w [%ud ins] ~] hip) - -We're given a duct, the beginning revision number, and the nako that -contains the updates since that revision. We use `++blab` to produce -this result to the subscriber. The case is `%w` with a revision number -of the beginning of the subscription, and the data is the nako itself. - -We call `++lobes-at-path:ze` to get the data at the particular path. - - ++ lobes-at-path :: lobes-at-path:ze - |= [oan=aeon pax=path] :: data at path - ^- (map path lobe) - ?: =(0 oan) ~ - %- mo - %+ skim - %. ~ - %~ tap by - =< q - %- tako-to-yaki - %- aeon-to-tako - oan - |= [p=path q=lobe] - ?| ?=(~ pax) - ?& !?=(~ p) - =(-.pax -.p) - $(p +.p, pax +.pax) - == == - -At revision zero, the theoretical common revision between all -repositories, there is no data, so we produce null. - -We get the list of paths (paired with their lobe) in the revision -referred to by the given number and we keep only those paths which begin -with `pax`. Converting to a map, we now have a map from the subpaths at -the given path to the hash of their data. This is simple and efficient -to calculate and compare to later revisions. This allows us to easily -tell if a node or its children have changed. - -Finally, we will describe `++make-nako:ze`. - - ++ make-nako :: gack a through b - |= [a=aeon b=aeon] - ^- [(map aeon tako) aeon (set yaki) (set blob)] - :_ :- b - =- [(takos-to-yakis -<) (lobes-to-blobs ->)] - %+ reachable-between-takos - (~(get by hit) a) :: if a not found, a=0 - (aeon-to-tako b) - ^- (map aeon tako) - %- mo %+ skim (~(tap by hit) ~) - |= [p=aeon *] - &((gth p a) (lte p b)) - -We need to produce four things -- the numbers of the new commits, the -number of the latest commit, the new commits themselves, and the new -data itself. - -The first is fairly easy to produce. We simply go over our map of -numbered commits and produce all those numbered greater than `a` and not -greater than `b`. - -The second is even easier to produce -- `b` is clearly our most recent -commit. - -The third and fourth are slightly more interesting, though not too -terribly difficult. First, we call `++reachable-between-takos`. - - ++ reachable-between-takos - |= [a=(unit tako) b=tako] :: pack a through b - ^- [(set tako) (set lobe)] - =+ ^= sar - ?~ a ~ - (reachable-takos r:(tako-to-yaki u.a)) - =+ yak=`yaki`(tako-to-yaki b) - %+ new-lobes-takos (new-lobes ~ sar) :: get lobes - |- ^- (set tako) :: walk onto sar - ?: (~(has in sar) r.yak) - ~ - =+ ber=`(set tako)`(~(put in `(set tako)`~) `tako`r.yak) - %- ~(uni in ber) - ^- (set tako) - %+ roll p.yak - |= [yek=tako bar=(set tako)] - ^- (set tako) - ?: (~(has in bar) yek) :: save some time - bar - %- ~(uni in bar) - ^$(yak (tako-to-yaki yek)) - -We take a possible starting commit and a definite ending commit, and we -produce the set of commits and the set of data between them. - -We let `sar` be the set of commits reachable from `a`. If `a` is null, -then obviously no commits are reachable. Otherwise, we call -`++reachable-takos` to calculate this. - - ++ reachable-takos :: reachable - |= p=tako :: XX slow - ^- (set tako) - =+ y=(tako-to-yaki p) - =+ t=(~(put in _(set tako)) p) - %+ roll p.y - |= [q=tako s=_t] - ?: (~(has in s) q) :: already done - s :: hence skip - (~(uni in s) ^$(p q)) :: otherwise traverse - -We very simply produce the set of the given tako plus its parents, -recursively. - -Back in `++reachable-between-takos`, we let `yak` be the yaki of `b`, -the ending commit. With this, we create a set that is the union of `sar` -and all takos reachable from `b`. - -We pass `sar` into `++new-lobes` to get all the lobes referenced by any -tako referenced by `a`. The result is passed into `++new-lobes-takos` to -do the same, but not recomputing those in already calculated last -sentence. This produces the sets of takos and lobes we need. - - ++ new-lobes :: object hash set - |= [b=(set lobe) a=(set tako)] :: that aren't in b - ^- (set lobe) - %+ roll (~(tap in a) ~) - |= [tak=tako bar=(set lobe)] - ^- (set lobe) - =+ yak=(tako-to-yaki tak) - %+ roll (~(tap by q.yak) ~) - |= [[path lob=lobe] far=_bar] - ^- (set lobe) - ?~ (~(has in b) lob) :: don't need - far - =+ gar=(lobe-to-blob lob) - ?- -.gar - %direct (~(put in far) lob) - %delta (~(put in $(lob q.gar)) lob) - %indirect (~(put in $(lob s.gar)) lob) - == - -Here, we're creating a set of lobes referenced in a commit in `a`. We -start out with `b` as the initial set of lobes, so we don't need to -recompute any of the lobes referenced in there. - -The algorithm is pretty simple, so we won't bore you with the details. -We simply traverse every commit in `a`, looking at every blob referenced -there, and, if it's not already in `b`, we add it to `b`. In the case of -a direct blob, we're done. For a delta or an indirect blob, we -recursively add every blob referenced within the blob. - - ++ new-lobes-takos :: garg & repack - |= [b=(set lobe) a=(set tako)] - ^- [(set tako) (set lobe)] - [a (new-lobes b a)] - -Here, we just update the set of lobes we're given with the commits we're -given and produce both sets. - -This concludes our discussion of a local subscription. - -Lifecycle of a Foreign Read or Subscription -------------------------------------------- - -Foreign reads and subscriptions are handled in much the same way as -local ones. The interface is the same -- a vane or app sends a `%warp` -kiss with the request. The difference is simply that the `sock` refers -to the foreign ship. - -Thus, we start in the same place -- in `++call`, handling `%warp`. -However, since the two side of the `sock` are different, we follow a -different path. - - =+ wex=(do now p.q.hic p.q.q.hic ruf) - =+ ^= woo - ?~ q.q.q.hic - abet:(ease:wex hen) - abet:(eave:wex hen u.q.q.q.hic) - [-.woo (posh q.p.q.hic p.q.q.hic +.woo ruf)] - -If we compare this to how the local case was handled, we see that it's -not all that different. We use `++do` rather than `++un` and `++de` to -set up the core for the foreign ship. This gives us a `++de` core, so we -either cancel or begin the request by calling `++ease` or `++eave`, -exactly as in the local case. In either case, we call `++abet:de` to -resolve our various types of output into actual moves, as described in -the local case. Finally, we call `++posh` to update our raft, putting -the modified rung into the raft. - -We'll first trace through `++do`. - - ++ do - |= [now=@da [who=ship him=ship] syd=@tas ruf=raft] - =+ ^= rug ^- rung - =+ rug=(~(get by hoy.ruf) him) - ?^(rug u.rug *rung) - =+ ^= red ^- rede - =+ yit=(~(get by rus.rug) syd) - ?^(yit u.yit `rede`[~2000.1.1 ~ [~ *rind] *dome]) - ((de now ~ ~) [who him] syd red ran.ruf) - -If we already have a rung for this foreign ship, then we use that. -Otherwise, we create a new blank one. If we already have a rede in this -rung, then we use that, otherwise we create a blank one. An important -point to note here is that we let `ref` in the rede be `[~ *rind]`. -Recall, for domestic desks `ref` is null. We use this to distinguish -between foreign and domestic desks in `++de`. - -With this information, we create a `++de` core as usual. - -Although we've already covered `++ease` and `++eave`, we'll go through -them quickly again, highlighting the case of foreign request. - - ++ ease :: release request - |= hen=duct - ^+ +> - ?~ ref +> - =+ rov=(~(got by qyx) hen) - =. qyx (~(del by qyx) hen) - (mabe rov (cury best hen)) - =. qyx (~(del by qyx) hen) - |- ^+ +>+.$ - =+ nux=(~(get by fod.u.ref) hen) - ?~ nux +>+.$ - %= +>+.$ - say [[hen [(scot %ud u.nux) ~] for [u.nux syd ~]] say] - fod.u.ref (~(del by fod.u.ref) hen) - bom.u.ref (~(del by bom.u.ref) u.nux) - == - -Here, we still remove the duct from our cult (we maintain a cult even -for foreign desks), but we also need to tell the foreign desk to cancel -our subscription. We do this by sending a request (by appending to -`say`, which gets resolved in `++abet:de` to a `%want` kiss to ames) to -the foreign ship to cancel the subscription. Since we don't anymore -expect a response on this duct, we remove it from `fod` and `bom`, which -are the maps between ducts, raves, and request sequence numbers. -Basically, we remove every trace of the subscription from our request -manager. - -In the case of `++eave`, where we're creating a new request, everything -is exactly identical to the case of the local request except `++duce`. -We said that `++duce` simply puts the request into our cult. This is -true for a domestic request, but distinctly untrue for foreign requests. - - ++ duce :: produce request - |= [hen=duct rov=rove] - ^+ +> - =. qyx (~(put by qyx) hen rov) - ?~ ref +>.$ - |- ^+ +>+.$ :: XX why? - =+ rav=(reve rov) - =+ ^= vaw ^- rave - ?. ?=([%& %v *] rav) rav - [%| [%ud let.dom] `case`q.p.rav r.p.rav] - =+ inx=nix.u.ref - %= +>+.$ - say [[hen [(scot %ud inx) ~] for [inx syd ~ vaw]] say] - nix.u.ref +(nix.u.ref) - bom.u.ref (~(put by bom.u.ref) inx [hen vaw]) - fod.u.ref (~(put by fod.u.ref) hen inx) - == - -If we have a request manager (i.e. this is a foreign desk), then we do -the approximate inverse of `++ease`. We create a rave out of the given -request and send it off to the foreign desk by putting it in `say`. Note -that the rave is created to request the information starting at the next -revision number. Since this is a new request, we put it into `fod` and -`bom` to associate the request with its duct and its sequence number. -Since we're using another sequence number, we must increment `nix`, -which represents the next available sequence number. - -And that's really it for this side of the request. Requesting foreign -information isn't that hard. Let's see what it looks like on the other -side. When we get a request from another ship for information on our -ship, that comes to us in the form of a `%wart` from ames. - -We handle a `%wart` in `++call`, right next to where we handle the -`%warp` case. - - %wart - ?> ?=(%re q.q.hic) - =+ ryf=((hard riff) s.q.hic) - :_ ..^$ - :~ :- hen - :^ %pass [(scot %p p.p.q.hic) (scot %p q.p.q.hic) r.q.hic] - %c - [%warp [p.p.q.hic p.p.q.hic] ryf] - == - -Every request we receive should be of type `riff`, so we coerce it into -that type. We just convert this into a new `%warp` kiss that we pass to -ourself. This gets handled like normal, as a local request. When the -request produces a value, it does so like normal as a `%writ`, which is -returned to `++take` along the path we just sent it on. - - %writ - ?> ?=([@ @ *] tea) - =+ our=(need (slaw %p i.tea)) - =+ him=(need (slaw %p i.t.tea)) - :_ ..^$ - :~ :- hen - [%pass ~ %a [%want [our him] [%r %re %c t.t.tea] p.+.q.hin]] - == - -Since we encoded the ship we need to respond to in the path, we can just -pass our `%want` back to ames, so that we tell the requesting ship about -the new data. - -This comes back to the original ship as a `%waft` from ames, which comes -into `++take`, right next to where we handled `%writ`. - - %waft - ?> ?=([@ @ ~] tea) - =+ syd=(need (slaw %tas i.tea)) - =+ inx=(need (slaw %ud i.t.tea)) - =+ ^= zat - =< wake - (knit:(do now p.+.q.hin syd ruf) [inx ((hard riot) q.+.q.hin)]) - =^ mos ruf - =+ zot=abet.zat - [-.zot (posh q.p.+.q.hin syd +.zot ruf)] - [mos ..^$(ran.ruf ran.zat)] :: merge in new obj - -This gets the desk and sequence number from the path the request was -sent over. This determines exactly which request is being responded to. -We call `++knit:de` to apply the changes to our local desk, and we call -`++wake` to update our subscribers. Then we call `++abet:de` and -`++posh` as normal (like in `++eave`). - -We'll examine `++knit` and `++wake`, in that order. - - ++ knit :: external change - |= [inx=@ud rot=riot] - ^+ +> - ?> ?=(^ ref) - |- ^+ +>+.$ - =+ ruv=(~(get by bom.u.ref) inx) - ?~ ruv +>+.$ - => ?. |(?=(~ rot) ?=(& -.q.u.ruv)) . - %_ . - bom.u.ref (~(del by bom.u.ref) inx) - fod.u.ref (~(del by fod.u.ref) p.u.ruv) - == - ?~ rot - =+ rav=`rave`q.u.ruv - %= +>+.$ - lim - ?.(&(?=(| -.rav) ?=(%da -.q.p.rav)) lim `@da`p.q.p.rav) - :: - haw.u.ref - ?. ?=(& -.rav) haw.u.ref - (~(put by haw.u.ref) p.rav ~) - == - ?< ?=(%v p.p.u.rot) - =. haw.u.ref - (~(put by haw.u.ref) [p.p.u.rot q.p.u.rot q.u.rot] ~ r.u.rot) - ?. ?=(%w p.p.u.rot) +>+.$ - |- ^+ +>+.^$ - =+ nez=[%w [%ud let.dom] ~] - =+ nex=(~(get by haw.u.ref) nez) - ?~ nex +>+.^$ - ?~ u.nex +>+.^$ :: should never happen - =. +>+.^$ =+ roo=(edis ((hard nako) u.u.nex)) - ?>(?=(^ ref.roo) roo) - %= $ - haw.u.ref (~(del by haw.u.ref) nez) - == - -This is kind of a long gate, but don't worry, it's not bad at all. - -First, we assert that we're not a domestic desk. That wouldn't make any -sense at all. - -Since we have the sequence number of the request, we can get the duct -and rave from `bom` in our request manager. If we didn't actually -request this data (or the request was canceled before we got it), then -we do nothing. - -Else, we remove the request from `bom` and `fod` unless this was a -subscription request and we didn't receive a null riot (which would -indicate the last message on the subscription). - -Now, if we received a null riot, then if this was a subscription request -by date, then we update `lim` in our request manager (representing the -latest time at which we have complete information for this desk) to the -date that was requested. If this was a single read request, then we put -the result in our simple cache `haw` to make future requests faster. -Then we're done. - -If we received actual data, then we put it into our cache `haw`. Unless -it's a `%w` request, we're done. - -If it is a `%w` request, then we try to get the `%w` at our current head -from the cache. In general, that should be the thing we just put in a -moment ago, but that is not guaranteed. The most common case where this -is different is when we receive desk updates out of order. At any rate, -since we now have new information, we need to apply it to our local copy -of the desk. We do so in `++edis`, and then we remove the stuff we just -applied from the cache, since it's not really a true "single read", like -what should really be in the cache. - - ++ edis :: apply subscription - |= nak=nako - ^+ +> - %= +> - hit.dom (~(uni by hit.dom) gar.nak) - let.dom let.nak - lat.ran %+ roll (~(tap in bar.nak) ~) - =< .(yeb lat.ran) - |= [sar=blob yeb=(map lobe blob)] - =+ zax=(blob-to-lobe sar) - %+ ~(put by yeb) zax sar - hut.ran %+ roll (~(tap in lar.nak) ~) - =< .(yeb hut.ran) - |= [sar=yaki yeb=(map tako yaki)] - %+ ~(put by yeb) r.sar sar - == - -This shows, of course, exactly why nako is defined the way it is. To -become completely up to date with the foreign desk, we need to merge -`hit` with the foreign one so that we have all the revision numbers. We -update `let` so that we know which revision is the head. - -We merge the new blobs in `lat`, keying them by their hash, which we get -from a call to `++blob-to-lobe`. Recall that the hash is stored in the -actual blob itself. We do the same thing to the new yakis, putting them -in `hut`, keyed by their hash. - -Now, our local dome should be exactly the same as the foreign one. - -This concludes our discussion of `++knit`. Now the changes have been -applied to our local copy of the desk, and we just need to update our -subscribers. We do so in `++wake:de`. - - ++ wake :: update subscribers - ^+ . - =+ xiq=(~(tap by qyx) ~) - =| xaq=(list ,[p=duct q=rove]) - |- ^+ ..wake - ?~ xiq - ..wake(qyx (~(gas by *cult) xaq)) - ?- -.q.i.xiq - & - =+ cas=?~(ref ~ (~(get by haw.u.ref) `mood`p.q.i.xiq)) - ?^ cas - %= $ - xiq t.xiq - ..wake ?~ u.cas (blub p.i.xiq) - (blab p.i.xiq p.q.i.xiq u.u.cas) - == - =+ nao=(~(case-to-aeon ze lim dom ran) q.p.q.i.xiq) - ?~ nao $(xiq t.xiq, xaq [i.xiq xaq]) - $(xiq t.xiq, ..wake (balk p.i.xiq u.nao p.q.i.xiq)) - :: - | - =+ mot=`moot`p.q.i.xiq - =+ nab=(~(case-to-aeon ze lim dom ran) p.mot) - ?~ nab - $(xiq t.xiq, xaq [i.xiq xaq]) - =+ huy=(~(case-to-aeon ze lim dom ran) q.mot) - ?~ huy - =+ ptr=[%ud +(let.dom)] - %= $ - xiq t.xiq - xaq [[p.i.xiq [%| ptr q.mot r.mot s.mot]] xaq] - ..wake =+ ^= ear - (~(lobes-at-path ze lim dom ran) let.dom r.p.q.i.xiq) - ?: =(s.p.q.i.xiq ear) ..wake - =+ fud=(~(make-nako ze lim dom ran) u.nab let.dom) - (bleb p.i.xiq let.dom fud) - == - %= $ - xiq t.xiq - ..wake =- (blub:- p.i.xiq) - =+ ^= ear - (~(lobes-at-path ze lim dom ran) u.huy r.p.q.i.xiq) - ?: =(s.p.q.i.xiq ear) (blub p.i.xiq) - =+ fud=(~(make-nako ze lim dom ran) u.nab u.huy) - (bleb p.i.xiq +(u.nab) fud) - == - == - -- - -This is even longer than `++knit`, but it's pretty similar to `++eave`. -We loop through each of our subscribers `xiq`, processing each in turn. -When we're done, we just put the remaining subscribers back in our -subscriber list. - -If the subscriber is a single read, then, if this is a foreign desk -(note that `++wake` is called from other arms, and not only on foreign -desks). Obviously, if we find an identical request there, then we can -produce the result immediately. Referential transparency for the win. We -produce the result with a call to `++blab`. If this is a foreign desk -but the result is not in the cache, then we produce `++blub` (canceled -subscription with no data) for reasons entirely opaque to me. Seriously, -it seems like we should wait until we get an actual response to the -request. If someone figures out why this is, let me know. At any rate, -it seems to work. - -If this is a domestic desk, then we check to see if the case exists yet. -If it doesn't, then we simply move on to the next subscriber, consing -this one onto `xaq` so that we can check again the next time around. If -it does exist, then we call `++balk` to fulfill the request and produce -it. - -`++balk` is very simple, so we'll describe it here before we get to the -subscription case. - - ++ balk :: read and send - |= [hen=duct oan=@ud mun=mood] - ^+ +> - =+ vid=(~(read-at-aeon ze lim dom ran) oan mun) - ?~ vid (blub hen) (blab hen mun u.vid) - -We call `++read-at-aeon` on the given request and aeon. If you recall, -this processes a `mood` at a particular aeon and produces the result, if -there is one. If there is data at the requested location, then we -produce it with `++blab`. Else, we call `++blub` to notify the -subscriber that no data can ever come over this subscriptioin since it -is now impossible for there to ever be data for the given request. -Because referential transparency. - -At any rate, back to `++wake`. If the given rave is a subscription -request, then we proceed similarly to how we do in `++eave`. We first -try to get the aeon referred to by the starting case. If it doesn't -exist yet, then we can't do anything interesting with this subscription, -so we move on to the next one. - -Otherwise, we try to get the aeon referred to by the ending case. If it -doesn't exist yet, then we produce all the information we can. We call -`++lobes-at-path` at the given aeon and path to see if the requested -path has actually changed. If it hasn't, then we don't produce anything; -else, we produce the correct nako by calling `++bleb` on the result of -`++make-nako`, as in `++eave`. At any rate, we move on to the next -subscription, putting back into our cult the current subscription with a -new start case of the next aeon after the present. - -If the aeon referred to by the ending case does exist, then we drop this -subscriber from our cult and satisfy its request immediately. This is -the same as before -- we check to see if the data at the path has -actually changed, producing it if it has; else, we call `++blub` since -no more data can be produced over this subscription. - -This concludes our discussion of foreign requests. diff --git a/pub/doc/arvo/dill.md b/pub/doc/arvo/dill.md deleted file mode 100644 index 01c915a19f..0000000000 --- a/pub/doc/arvo/dill.md +++ /dev/null @@ -1,27 +0,0 @@ -
- -`%dill` -======= - -Our terminal driver. - -Unix sends keyboard events to `%dill` from either the console or telnet, -and `%dill` produces terminal output. The only app that should directly -talk to `%dill` is the terminal app. Command-line apps are run by, -receive input from, and produce output to, the `%shell` app, which is -controlled by `%terminal`, which talks to `%dill`, which talks to unix. -Clay also uses `%dill` directly to print out the filesystem change -events, but this is questionable behavior. - -`%dill` has two main components. First, it controls the terminal on a -very basic, event-by-event level. This includes keeping track of things -like the dimensions of the terminal, the prompt type (plain text or -password), which duct to produce effects on, and so forth. Second, it -handles terminal events, keystroke by keystroke. Most characters are -simply pushed onto the buffer and blitted to the screen, but some -characters, including control-modified keys, arrow keys, etc. require -special handling. Most of the readline functionality is in `%dill`. - -
- ------------------------------------------------------------------------- diff --git a/pub/doc/arvo/dill/commentary.md b/pub/doc/arvo/dill/commentary.md deleted file mode 100644 index 76264f05f6..0000000000 --- a/pub/doc/arvo/dill/commentary.md +++ /dev/null @@ -1,5 +0,0 @@ -Dill: Reference -=============== - -Dill: Commentary -================ diff --git a/pub/doc/arvo/eyre.md b/pub/doc/arvo/eyre.md deleted file mode 100644 index ec11c90d74..0000000000 --- a/pub/doc/arvo/eyre.md +++ /dev/null @@ -1,63 +0,0 @@ -
- -`%eyre` -======= - -Our http server. - -Unix sends http messages to `%eyre`, and `%eyre` produces http messages -in response. In general, apps and vanes do not call `%eyre`; rather, -`%eyre` calls apps and vanes. `%eyre` uses `%ford` and `%gall` to -functionally publish pages and facilitate communication with apps. - -`%eyre` primarily parses web requests and handles them in a variety of -ways, depending on the control string. Nearly all of these are -essentially stateless, like functional publishing with `%ford`. -Additionally, there's a fairly significant component that handles -`%gall` messaging and subscriptions, which must be stateful. - -
- ------------------------------------------------------------------------- - -HTTP Methods -============ - -`GET` `gog` `https://[ship-name].urbit.org/gog/[service]` Owner -requesting a page on her own Urbit. `gig` -`https://[ship-name].urbit.org/gig/[user-name]/[service]` Another user -requesting a page on a foreign Urbit. - -`goe` -`https://[ship-name].urbit.org/goe/[service]/[port]/[stream]/[sequence]` -`https://[ship-name].urbit.org/goe/[service]/[port]/[stream]/[sequence].json` -Pulls a specific response to her subscription on her own Urbit. `gie` -`https://[ship-name].urbit.org/gie/[user-name]/[service]/[port]/[stream]/[sequence]` -`https://[ship-name].urbit.org/gie/[user-name]/[service]/[port]/[stream]/[sequence].json` -Pulls a specific response to her subscription on a foreign Urbit. - -`PUT` `tos` -`https://[ship-name].urbit.org/tos/[service]/[port]/[stream]/[path]` -`{oryx: [string]}` Initiate a subscription on her own Urbit. `tis` -`https://[ship-name].urbit.org/tis/[user-name]/[service]/[port]/[stream]/[path]` -`{oryx: [string]}` Initiate a subscription on a foreign Urbit. - -`tom` `https://[ship-name].urbit.org/tom/[service]/[port]/[sequence]` -`{oryx: [string], xyro: [json]}` Send a message to her Urbit with -sequence number `[sequence]`. `tim` -`https://[ship-name].urbit.org/tim/[user-name]/[service]/[port]/[sequence]` -`{oryx: [string], xyro: [json]}` Send a message to a foreign Urbit with -sequence number `[sequence]`. - -`tou` `https://[ship-name].urbit.org/tou/[service]/[port]/[stream]` -Unsubscribe from stream `[stream]` on her Urbit. `tiu` -`https://[ship-name].urbit.org/tiu/[user-name]/[service]/[port]/[stream]` -Unsubscribe from stream `[stream]` on a foreign Urbit. - -urb.js -====== - - -
- - diff --git a/pub/doc/arvo/eyre/commentary.md b/pub/doc/arvo/eyre/commentary.md deleted file mode 100644 index 3997127ac5..0000000000 --- a/pub/doc/arvo/eyre/commentary.md +++ /dev/null @@ -1,273 +0,0 @@ -Eyre: Reference -=============== - -## State - -Stored - -- `++bolo` -- `++cyst` -- `++stem` - -Runtime - -- `perk` - + `perk-auth` -- `pest` -- `even` - -## Cores - -- `++handle` - + `++parse` - - `++as...` -- `++ya` -- `++ix` - -Eyre: Commentary -================ - -Let us follow the loading of a simple cli app, as it bounces from -browser to server to browser and back. - -## Initial request[#init] - -An http request for `http://sampel-sipnym.urbit.org/cli` will be [redirected](dns) -to the `%eyre` on ~sampel-sipnym, and come in as a `%this` kiss. - -From arvo, requests enter `++call`, which after some type reification are passed -along to `++apex:ye`. In the case of a `%this` kiss, its components are -parsed(see `++zest:epur`, `++eat-headers`) and handed off to `++handle`, wrapped -in `++emule` to produce a `++fail` page in case of error. `++apex:handle` will -`++process` the request to a `pest` or a `++done` core, and in the former case -`++resolve` the pest into an outgoing card. - -XX it also seems to affect the current ship, test that serving ship name is consistently correct - -The pest is produced by `++process`, which will first further `++parse` the -request, and if this does not make the response immediately obvious, -`++process-parsed` the resulting `perk`. - -`++parse` produces the `perk`, by attempting to interpret the `pork`(url path) -[`++as-magic-filename`](#mage), `++as-beam`, and `++as-aux-request`. In this -case, `/cli` is parsed by the second case as a `%beam` query to `/=cli=`: a path -which starts with a valid ship name is expected to be a full clay(well, ford) -path, and one starting with a term implies the current serving ship and a case -of `0`, the current revision. - -XX spur: when the desks are merged, `/cli` shall point to `/=main=/pub/cli` - -The parsed `perk` generates a `%f %boil` note, `mark`ed as its extension(here -defaulting to `%urb`) and `wire`d with `~` to return unaltered to the client. It -goes on to `++resolve` by being passed to `++ford-get-beam`, which translates -the perk it into a `%boil` `++ford-req`, adding an `++fcgi` path-segment -containing query string and `++fcgi-cred:for-client` auth information. - -`%ford`s translation of `/=cli=/hymn/hook` to a self-refreshing `%urb` html page -[deserves its own commentary](../ford/commentary), but we resume in `%eyre` -when the `%made` sign arrives in `++take`, and soon after `++axon:ye`. There the -`wire`, or rather the `whir` it has been verified to be, determines that the -response should be served immediately. However, as the mark is not `%mime`, -another trip to `%ford` is required to encode it, on the same wire; afterwards, -the value of the `%mime` cage is verified to be of the correct type, and finally -delivered back up the requesting duct as a succesful `%thou` HTTP response. - -XX `%cast %mime` used to be in ford-get-beam, is there a reason it was removed? - -## Back into the breach, or: auxilary requests - -Now, it was mentioned that this result is self-refreshing: the `%urb` -translation door injects a `;script@"/~/on/{deps}.js"` into every page, `deps` -is a ford-readable hash of the set of resources that page construction depended -on. - -This triggers another `%this` request. Its handling is identical to that of -`/cli` up until `++parse`, where it is seen not `++as-beam` but -`++as-aux-request`(auxillary requests starting with `/~/` or `/~~/`). -`/on/[hash]` is a long-`%poll`, which `++process-parsed`, for a `.js` mark, -answers with a direct `%js`. Its contents are the static `++poll:js`, which -initiates the long-polling loop, run against an injected `urb.js` of -`{poll:[hash]}`. - -A `%js` `pest` is `resolve`d as a `text/javascript` success `%this`. - -When `poll.js` is recieved by the client, it opens an `XMLHttpRequest` for -`/~/on/{window.urb.poll}.json`, bringing us back to `%poll:process`. - -In the case of a non-`%js` `/~/on/`, `%poll:process-parsed` turns into a -`++new-dependency`, which stores the listening duct, and `pass-note`s a `%wasp` -with the deps-hash back to `%ford` whence it came. While this occured, the page -has loaded. - -Some indeterminate amount of time afterwards, with dropped `/~/on/{...}.json`s -being retried upon expiring and also being stored, a `%news` sign arrives in -`++axon`, and the hash in question is retrieved from the wire, and the listening -long-polls retrieved by the hash. Each receives a 205 "Reload parent view" HTTP -response, which `poll.js` dutifully executes, and a fixed typo of markdown is -rendered. - -## Authentication. - -Now, while this accurately reflects the presentation of e.g. a markdown file, -`/cli` is an application front-end, and one that permits only owner access. Its -second script is `@"/~~/~/at/main/lib/urb.js"`, semantically equivalent to -`/~/as/own/~/at/main/lib/urb.js`, and handled as follows. - -In `++as-aux-request`, `%as %own` becomes `%auth %get our` perk, which -`++process` passes to `++process-parsed` passes to `++process-auth`. There, a -`yac` "ya" core is built `++for-client`: a `++cookie-prefix`, which is just the -serving ship name, is used to get a `++session-from-cookies`, here nil as the -client has no cookie set. In lieu of a cookie, a `++new-ya` is constructed, with -a random token `hole` and a `++new-cyst` which fills out `cyst` session state -from request data. - -Returning to `++process-auth`, `%get` checks if the yac is authenticated with -the requested credentials(`anon` requests are always granted), which for the -fresh new `cyst` is not the case (more on success [later](#auth-ok)). Unless -authentiacting as a [foreign ship](#xeno), the only thing left is to -`++show-login-page`, which detects that the requested resource is not `%html`, -and produces a `%red` pest. For `%js`, `%red`irections `++resolve` to -`++auth-redir:js`, a line of javascript which prepends `/~~` to the url path. - -The owner-authenticated main page request similarly ends in `++show-login-page`, -which for the empty session is an `[%htme ++login-page:xml]`, `resolve`d to -`++give-html` with a 401 "unathorized". - -The login page shows a simple prompt, and requests `/~/at/auth.js` to handle the -submission. And so we are, once again, attempting to divine if what we're doing -makes sense `++as-aux-request`. - -To understand `/~/at`, there will first be a brief diversion to `~/auth.json`. -`auth.json`, perk `[%auth %json]`, in `++process-auth` serves `++stat-json:ya`, -containing such information as the serving ship, which identities are associated -with this session, and `oryx`, a CSRF token. An oryx must be present on all -stateful requests, in this case executing a log in. It also saves the new/old -session using `abet`. - -XX explain `ixor` here and not [later](#ixor)? - -`/~/at` is an alternate interface, which injects `auth.json` data into the -requested file. `/~/at/auth.js`, then, is a request for the built-in `auth:js` -(parsed to and processed from an `[%auth %js ~]` perk), with session data added -as `window.urb`. And indeed, ``[`%js /~/at/auth]`` is parsed to -``[%auth at [`%js /auth]``, which in `++process-auth` is re-`process`ed to -`[%js ++auth:js]`, which is `++resolve`d after an `++add-json` of -the relevant data. The yac cookies are also passed to `resolve`, which -`++add-cookies` injects into the `httr`. - -It is at this point that there is first occasion for user input, namely the password. - -The `auth:js` script sends a `PUT` request, also to `/~/auth.json`. In `parse`, -the first nontrivial `++check-oryx` occurs, `++grab-body` the request oryx and -ensuring it is recorded for the session. The request parsed with `++need-body` -to a `[%auth %try {password}]` perk. `%get:process-auth` checks it against -`++load-secret`, upon success updates the session with `++logon:ya`, and -serves a fresh `auth.json` which reflects the changed `user`. Upon recieving -this, the page is refreshed to retry the original request. - -## Post-authentication: app communication. [#auth-ok] - -Upon refresh, `/~~/cli` brings us for the third time to `%get:process-auth`, but -this time the cookie is set, and the `yac` fetched contains the serving ship as -authenticated. The `++handle` sample is updated to reflect the requesting ship, -and the `process` continues for the rest of the pork, once again serving the -ford page. - -The `/~/on/[deps].json` poll starts anew, and `/~~/~/at/main/lib/urb.js` we now -know to serve the window.urb necessary to make requests, and the `urb.js` -standard library which extends it with a number of wrappers to them and other -useful functions. - ---- - -One of those functions is `urb.bind`, which is used to subscribe to application -data. Userspace javascript sets `urb.appl` to `/tic`, and binds `lines` to a -`;pre;` text display, using a callback. - -This triggers a `PUT` to `/~/is/{ixor}/cli/lines.json`, where `ixor` is a hash -of `oryx` that identifies the connection. `++as-aux-request`, an `%is` is a -`%subs` subscription update update, which for `%put` forwards to -`++add-subs:ix`, the ix core fetched `++for-view` by hashing the request -`++oryx-to-ixor`. - -[#ixor] A view has all the state associated with a client that must be -remembered between events. In this case, this is what app/path the request duct -is associated with; but mainly, `++add-subs:ix` will `pass-note` to `%gall` so -it `%show`s the data on the path, current and future. - -This will immediately(assuming the ship is local) result in a `%nice` by the -`/cli` app, returning `{ok:true}` `++nice-json` to `urb.bind`'s second callback -as `{ok:true}`. The initial `%rush` results also arrive, and in `++axon` are -converted to json using `++back`(ford `%cast` wrapper), and when `%made` get -passed to `++get-rush:ix`. There the source application/path are decoded by -duct, and then the full event goes to `++get-even`; `++add-even` inserts it to -the queue, and as there is no long poll it simply stays there. - -Upon receipt, the client realizes the long-poll isn't actually running, so that -is started using `urb.poll`. At `/~/of/{ixor}`, perk -`[%view ixor ~ {sequence-number}]`, it is `process`ed by `++poll:ix` (the cyst -is retrieved by `++ire-ix` form global state, using the perk `ixor`): the -sequence number is in the past, so the previously recieved `%rush` is -`++give-even`. After deleting the previous message in the queue and invoking -`++pass-took` to signal `%gall` of this occurrence, the data is annotated with -the source app+path `++subs-to-json`, and returned to the polling duct. - -On the client, the user callback receives the `/cli` history, and displays it on -the page. The `/~/of` long poll is continued, this time reaching `++poll:ix` -with the "pending" sequence number, and being stored in the `cyst` for its troubles. - ---- - -Its next update proceeds idenitcally, but first it must be triggered, which -happens when the user enters "(add 2 2)\n", firing an `urb.send` from the event -handler. This sends a `POST` request to `/~/to/cli/json.json`, perk `%mess`, -`++process`ed to a `%g %mess`. Were the mark not `%json`, a `%ford` conversion -would occur first, and `%made:axon` would send the gall message proper. In -either case, eventually a `%mean` or `%nice` arrives, is encoded as json, and -sent to the client callback. - -## A path not taken: magic filenames [#mage] - -The `/robots.txt` and `/favicon.(ico|png)` files are static, and served -immediately when caught by a `++parse`. - -XX index.html? - -## A path not taken: foreign auth [#xeno] - -While this example details a login `/~/as/own`, it is possible to be -authenticated as any ship on the network. A request for such seen in -`%get:process-auth` is passed to `++foreign-auth:ya`, which sends an -`%ames /lon` message to the ship in question. The foreign ship stores the -inquiry, calculates(the local) `++our-host` and responds with a `/hat`, -containing the redirection host, which is stored by `++foreign-hat`; it is later -used to send the client to a `/~/am` url on the foreign client, which acts as a -normal login page but later sends the client back. XX expand, basically the -status quo is you're logged in and `/~/as/foo` is ignored, just setting your -`urb.user` XX - - -## A path not taken: deauthentication - -`/~/away`, perk `[%away ~]`, produces a static `++logout-page:xml`, which also -uses `/~/at/auth.js`, to send a `DELETE /~/auth.json`, perk `[%auth %del]`. This -executes `++logoff:ya` for the cookie session, resolving to `++abut` to wipe it -from memory. - -## A path not taken: unsubscription - -`DELETE /~/is/app/path/within` works much like `PUT /~/is/app/path/within`, -`++del-subs:ix` acting as reverse of `++add-subs` by deleting the duct binding -and sending `%g %nuke`. - - -XX unmentioned arms: abet, add-poll, adit, ames-gram, anon, ares-to-json, bolo, cyst, doze, even, ford-kill, get-mean, gift, give-json, give-thou, gram, hapt, hasp, host-to-ship, ix, ixor, js, kiss, load, mean-json, move, note, pass-note, perk, perk-auth, pest, poke-test, print-subs, render-tang, resp, root-beak, scry, ses-authed, ses-ya, sign, silk, sine, stay, stem, teba, titl, to-oryx, urb, wait-era, wake, whir, wush, xml, ya, ye - -## Appendix A: DNS [#dns] - -The `*.urbit.org` domain can be used to access destroyers and cruisers. In the -common case oh hosted ships, this is done by dynamic DNS directly to the hosting -instance. We do not speak of the uncommon case. When ports are blocked and -infrastructure crumbles around you, only imported martian networking can be -trusted: the `%get` and `%got` [gram]()s are used to proxy [`%this` requests]() and -[`%thou` responses]() respectively. diff --git a/pub/doc/arvo/ford.md b/pub/doc/arvo/ford.md deleted file mode 100644 index c258b08d36..0000000000 --- a/pub/doc/arvo/ford.md +++ /dev/null @@ -1,128 +0,0 @@ -
- -`%ford` -======= - -Our typed and marked computation engine. - -A variety of different services are provided by `%ford`, but they mostly -involve compiling hook files, slapping/slammming code with marked data, -and converting data between marks, including validating data to a mark. -Throughout every computation, `%ford` keeps track of which resources are -dependencies so that the client may be aware when one or more -dependencies are updated. - -`%ford` neither accepts unix events nor produces effects. It exists -entirely for the benefit of applications and other vanes, in particular -`%gall`. `%eyre` exposes the functional publishing aspects of `%ford` -while `%gall` uses `%ford` to control the execution of applications. -`%clay` is intended to use `%ford` to managed marked data, but this is -not yet reality. - -
- ------------------------------------------------------------------------- - - - ------------------------------------------------------------------------- - -Cards -===== - -`%ford` accepts just one card, `%exec`. This is misleading, however, -since there are fourteen different `silk`s that may be used with it. In -every case, the expected response to a `%exec` card is a `%made` gift -with either an error or the produced result along with its set of -dependencies. - -Silks may autocons, so that the product of a cell of silks is a cell of -the product of the two silks. - -`%bake` -================ - -Tries to functionally produce the file at a given beam with the given -mark and heel. It fails if there is no way to translate at this level. - -`%boil` -================ - -Functionally produces the file at a given beam with the given mark and -heel. If there is no way to translate at this beam, we pop levels off -the stack and attempt to bake there until we find a level we can bake. -This should almost always be called instead of `%bake`. - -`%call` -================ - -Slams the result of one silk against the result of another. - -`%cast` -================ - -Translates the given silk to the given mark, if possible. This is one of -the critical and fundamental operations of ford. - -`%diff` -================ - -Diffs the two given silks (which must be of the same mark), producing a -cage of the mark specified in `++mark` in `++grad` for the mark of the -two silks. - -`%done` -================ - -Produces exactly its input. This is rarely used on its own, but many -silks are recursively defined in terms of other silks, so we often need -a silk that simply produces its input. A monadic return, if you will. - -`%dude` -================ - -Computes the given silk with the given tank as part of the stack trace -if there is an error. - -`%dune` -================ - -Produces an error if the cage is empty. Otherwise, it produces the value -in the unit. - -`%mute` -================ - -Takes a silk and a list of changes to make to the silk. At each wing in -the list we put the value of the associated silk. - -`%pact` -================ - -Applies the second silk as a patch to the first silk. The second silk -must be of the mark specified in `++mark` in `++grad` for the mark of -the first silk. - -`%plan` -================ - -Performs a structured assembly directly. This is not generally directly -useful because several other silks perform supersets of this -functionality. We don't usually have naked hoods outside ford. - -`%reef` -================ - -Produces a core containing the entirety of zuse and hoon, suitable for -running arbitrary code against. The mark is `%noun`. - -`%ride` -================ - -Slaps a twig against a subject silk. The mark of the result is `%noun`. - -`%vale` -================ - -Validates untyped data from a ship against a given mark. This is an -extremely useful function. diff --git a/pub/doc/arvo/ford/commentary.md b/pub/doc/arvo/ford/commentary.md deleted file mode 100644 index f87d778c6b..0000000000 --- a/pub/doc/arvo/ford/commentary.md +++ /dev/null @@ -1,2571 +0,0 @@ -Reference -========= - -Data Models ------------ - -### `++axle`, formal state - - ++ axle :: all %ford state - $: %1 :: version for update - pol=(map ship baby) :: - == :: - -This is the formal state of our vane. Anything that must be remembered -between calls to ford must be stored here. The number `%1` is a version -number for our state that allows us to upgrade the structure of our -state in the future if we wish. - -`pol` is the a map from every ship on our pier to their individual ford -state. There is no shared ford state -- every ship is entirely separate. - -### `++baby`, state by ship - - ++ baby :: state by ship - $: tad=[p=@ud q=(map ,@ud task)] :: tasks by number - dym=(map duct ,@ud) :: duct to task number - jav=(map ,* calx) :: cache - == :: - -This is the state specific to each ship. - -`tad` and `dym` keep track of the tasks we're currently working on. -`dym` is a map from ducts to task numbers, and `q.tad` is a map from -task number to the task itself. `p.tad` is the next available task -number. Thus, the keys of `q.tad` are a subset of the numbers less than -`p.tad`, and ford has attempted exactly `p.tad` tasks so far. - -`jav` is the cache of previously-solved problems. The keys are a pair of -a term (either `%hood`, `%slap`, or `%slam`) and a noun that represents -the exact problem solved. In the case of a `%hood`, then, the key is of -the form `[%hood beam cage]`. For `%slap`, there is `[%slap vase twig]`. -For `%slam`, there is `[%slam vase vase]`. The values are the result of -solving the problem. Note that this cache is wiped in `++stay` when ford -is reloaded. - -### `++task`, problem in progress - - ++ task :: problem in progress - $: nah=duct :: cause - kas=silk :: problem - kig=[p=@ud q=(map ,@ud beam)] :: blocks - == :: - -This is all the state we keep regarding a particular task. `nah` is the -duct which asked us to solve the problem, and `kas` is the most recent -statement of the problem. - -`kig` keeps track of which resources we are blocked on. Our blocks are -stored by index in `q.kig`, and the next available index is `p.kig`. - -### `++silk`, problem - - ++ silk :: construction layer - $& [p=silk q=silk] :: cons - $% [%bake p=mark q=beam r=path] :: local synthesis - [%boil p=mark q=beam r=path] :: general synthesis - [%call p=silk q=silk] :: slam - [%cast p=mark q=silk] :: translate - [%diff p=silk q=silk] :: diff - [%done p=(set beam) q=cage] :: literal - [%dude p=tank q=silk] :: error wrap - [%dune p=(set beam) q=(unit cage)] :: unit literal - [%mute p=silk q=(list (pair wing silk))] :: mutant - [%pact p=silk q=silk] :: patch - [%plan p=beam q=spur r=hood] :: structured assembly - [%reef ~] :: kernel reef - [%ride p=twig q=silk] :: silk thru twig - [%vale p=mark q=ship r=*] :: validate [our his] - == :: - -This is the every type of problem we can solve. Every `%exec` kiss that -requests us to solve a problem must choose one of these problems to -solve. - -Because this is both an internal structure used in ford and the public -interface to ford, we choose to document this structure in our -discussion of the public interface to ford below. - -### `++calx`, cache line - - ++ calx :: concrete cache line - $% [%hood p=calm q=(pair beam cage) r=hood] :: compile - [%slap p=calm q=[p=vase q=twig] r=vase] :: compute - [%slam p=calm q=[p=vase q=vase] r=vase] :: compute - == :: - -There are three kinds of cache entries. Every entry includes some -metadata in `p` and is the combination of an input and its output. - -The input to a `%hood` is the location of the resource and a cage -representing the data at that location. The output is the hood found by -compiling the given cage at the given location. - -The input to a `%slap` is the vase of the subject and the twig of the -formula against which we are slapping the subject. The output is the -vase produced by slapping them. - -The input to a `%slam` is the vase of the subject and the vase of the -gate which we are slapping. The output is the vase produced by slamming -them. - -### `++calm`, cache line metadata - - ++ calm :: cache metadata - $: laz=@da :: last accessed - dep=(set beam) :: dependencies - == :: - -Every line in the cache needs to have two pieces of metadata. We must -know the last time this line in the cache was accessed, and we must know -what are the dependencies of this line. - -### `++hood`, assembly components - - ++ hood :: assembly plan - $: zus=@ud :: zuse kelvin - sur=(list hoot) :: structures - lib=(list hoof) :: libraries - fan=(list horn) :: resources - src=(list hoop) :: program - == :: - -When assembling a hook file, we split it into several sections. - -`zus` is the kelvin version of the required zuse. In general, we assume -that any newer (lower-numbered) zuse will retain backward compatibility, -so any newer zuse is also permissible. This number is set with a `/?` at -the beginning of the file. - -`sur` is the set of structures included. These are included with the -`/-` rune. When a structure is included, we look in `/=main=/sur` for -the given structure and we load the gate there. When compiling, all the -included structures are collected into a single core placed in the -subject of the body with a `=>`. - -`lib` is the set of librarires included. These are included with the -`/+` rune. When a library is included, we look in `/=main=/lib` for the -given library and we load the library there. As with structures, all the -included libraries are collected into a single core placed in the -subject of the body with a `=>`. - -`fan` is the set of resources included. These are loaded in many -different ways and may load resources from any location. These are -placed in the subject of the body with a `=~`. - -`src` is the set of twigs or references to twigs in the body of the -program. Generally, each of these will represent a core, but this is not -required. When compiling, these are strung together in a `=~`. - -### `++hoot` - - ++ hoot (pair bean hoof) :: structure gate/core - -A structures may be either a direct gate or a core. These are -syntactically distinguished by adding a `*` to the beginning of the -structure name for a core. The structure itself is a `hoof`. - -### `++hoof` - - ++ hoof (pair term (unit (pair case ship))) :: resource reference - -A hoof, which is either a structure or a library, has a name, and it may -also specify which version of the resource to use and which ship to -retrieve it from. - -### `++horn` - - ++ horn :: resource tree - $% [%ape p=twig] :: /~ twig by hand - [%arg p=twig] :: /$ argument - [%day p=horn] :: /| list by @dr - [%dub p=term q=horn] :: /= apply face - [%fan p=(list horn)] :: /. list - [%for p=path q=horn] :: /, descend - [%hub p=horn] :: /@ list by @ud - [%man p=(map span horn)] :: /* hetero map - [%nap p=horn] :: /% homo map - [%now p=horn] :: /& list by @da - [%saw p=twig q=horn] :: /; operate on - [%see p=beam q=horn] :: /: relative to - [%sic p=tile q=horn] :: /^ cast - [%toy p=mark] :: /mark/ static - == :: - -This is how we represent the static resources hook files can load. The -discussion of their use from a user's perspective is documented -elsewhere (link), so we will here only give a description of the data -structure itself. - -A `%ape` horn is simply a twig that gets evaluated and placed in the -subject. - -A `%arg` is a gate that gets evaluated with a sample of our location and -our heel. - -A `%day` is a horn that applies to each of a list of `@dr`-named files -in the directory. - -A `%dub` is a term and horn, where the result of a horn is given the -face of the term. - -A `%fan` is a list of horns, all at the current directory level. - -A `%for` is a path and a horn, where the horn is evaluated relative to -the given path, where the given path is relative to the current -location. - -A `%hub` is a horn that applies to each of a list of `@ud`-named files -in the directory. - -A `%man` is a map of spans to horns where the result is a set of each -horn applied to the current directory given the associated face. - -A `%nap` is a homogenous map where each entry in a directory is handled -with the same horn and is given a face according to its name. - -A `%now` is a horn that applies to each of a list of `@da`-named files -in the directory. - -A `%saw` is a twig and a horn, where the twig operates on the result of -the horn. - -A `%see` is a beam and a horn, where the horn is evaluated at a location -of the given beam. - -A `%sic` is a tile and a horn, where the horn is evaluated and cast to -the type associated with the tile. - -A `%toy` is simply a mark to be baked. - -### `++hoop`, body - - ++ hoop :: source in hood - $% [%& p=twig] :: direct twig - [%| p=beam] :: resource location - == :: - -This is an entry in the body of the hook file. The hoop can either be -defined directly in the given file or it can be a reference to another -file. The second is specified with a `//` rune. - -### `++bolt`, monadic edge - - ++ bolt :: gonadic edge - |* a=$+(* *) :: product clam - $: p=cafe :: cache - $= q :: - $% [%0 p=(set beam) q=a] :: depends/product - [%1 p=(set ,[p=beam q=(list tank)])] :: blocks - [%2 p=(list tank)] :: error - == :: - == :: - -Throughout our computation, we let our result flow through with the set -of dependencies of the value. At various times, we may wish to either -throw an error or declare that the actual result cannot be found until a -particular resource is retrieved. This is a perfect case for a monad, so -here we define a data structure for it. - -At every step, we have a cache, so we store that in `p`. In `q` we store -the data. - -In the case of `%0`, we have the result in `q` and the set of -dependencies in `p`. - -In the case of `%1`, we have a set of dependencies on which we are -blocking. When this happens, we make a call to clay to get the -dependencies, and we proceed with the computation when we receive them. -Technically, we restart the computation, but since every expensive step -is cached, there is no significant performance penalty to doing this. -Referential transparency has its uses. - -In the case of `%2`, we have a hit an error. This gets passed all the -way through to the calling duct. The list of tanks is some description -of what went wrong, often including a stack trace. - -### `++burg`, monadic rule - - ++ burg :: gonadic rule - |* [a=$+(* *) b=$+(* *)] :: from and to - $+([c=cafe d=a] (bolt b)) :: - :: :: - -To operate on bolts, we use `++cope` as our bind operator, and the -functions it works on are of type `burg`. Our functions that operate on -bolts should have a sample of the cache and a value. Their output should -be a bolt of the output value. Then, `++cope` will only call the -function when necessary (in the `%0` case), and it will do so without -the wrapping of a bolt. - -If you understand monads, this is probably fairly obvious. Otherwise, -see the discussion on `++cope` (link). - -Public Interface ----------------- - -Ford does not export a scry interface, so the only way to interact with -ford is by sending kisses and receiving gifts. In fact, ford only sends -accepts one kiss and gives one gift. This is, of course, misleading -because ford actually does many different things. It does, however, only -produce one type of thing -- a result of a computation, which is either -an error or the value produced along with the set of dependencies -referenced by it. - - ++ kiss :: in request ->$ - $% [%exec p=@p q=(unit silk)] :: make / kill - == :: - -The `%exec` gift requests ford to perform a computation on behalf of a -particular ship. `p` is the ship, and `q` is the computation. If `q` is -null, then we are requesting that ford cancel the computation that it is -currently being run along this duct. Thus, if you wish to cancel a -computation, you must send the kiss along the same duct as the original -request. - -Otherwise, we ask ford to perform a certain computation, as defined in -`++silk`. Since all computations produce the same type of result, we -will discuss that result before we jump into `++silk`. - - ++ gift :: out result <-$ - $% [%made p=(each bead (list tank))] :: computed result - == :: - -We give either a `bead`, which is a result, or a list of tanks, which is -an error messge, often including a stack trace. - - ++ bead ,[p=(set beam) q=cage] :: computed result - -This is a set of dependencies required to compute this value and a cage -of the result with its associated mark. - -There are twelve possible computations defined in `++silk`. - - ++ silk :: construction layer - $& [p=silk q=silk] :: cons - $% [%bake p=mark q=beam r=path] :: local synthesis - [%boil p=mark q=beam r=path] :: general synthesis - [%call p=silk q=silk] :: slam - [%cast p=mark q=silk] :: translate - [%done p=(set beam) q=cage] :: literal - [%dude p=tank q=silk] :: error wrap - [%dune p=(set beam) q=(unit cage)] :: unit literal - [%mute p=silk q=(list (pair wing silk))] :: mutant - [%plan p=beam q=spur r=hood] :: structured assembly - [%reef ~] :: kernel reef - [%ride p=twig q=silk] :: silk thru twig - [%vale p=mark q=ship r=*] :: validate [our his] - == :: - -First, we allow silks to autocons. A cell of silks is also a silk, and -the product vase is a cell of the two silks. This obviously extends to -an arbitrary number of silks. - -`%bake` tries to functionally produce the file at a given beam with the -given mark and heel. It fails if there is no way to translate at this -level. - -`%boil` functionally produces the file at a given beam with the given -mark and heel. If there is no way to translate at this beam, we pop -levels off the stack and attempt to bake there until we find a level we -can bake. This should almost always be called instead of `%bake`. - -`%call` slams the result of one silk against the result of another. - -`%cast` translates the given silk to the given mark, if possible. This -is one of the critical and fundamental operations of ford. - -`%done` produces exactly its input. This is rarely used on its own, but -many silks are recursively defined in terms of other silks, so we often -need a silk that simply produces its input. A monadic return, if you -will. - -`%diff` diffs the two given silks (which must be of the same mark), -producing a cage of the mark specified in `++mark` in `++grad` for the -mark of the two silks. - -`%dude` computes the given silk with the given tank as part of the stack -trace if there is an error. - -`%dune` produces an error if the cage is empty. Otherwise, it produces -the value in the unit. - -`%mute` takes a silk and a list of changes to make to the silk. At each -wing in the list we put the value of the associated silk. - -`%pact` applies the second silk as a patch to the first silk. The second -silk must be of the mark specified in `++mark` in `++grad` for the mark -of the first silk. - -`%plan` performs a structured assembly directly. This is not generally -directly useful because several other silks perform supersets of this -functionality. We don't usually have naked hoods outside ford. - -`%reef` produces a core containing the entirety of zuse and hoon, -suitable for running arbitrary code against. The mark is `%noun`. - -`%ride` slaps a twig against a subject silk. The mark of the result is -`%noun`. - -`%vale` validates untyped data from a ship against a given mark. This is -an extremely useful function. - -Commentary -========== - -Parsing Hook Files ------------------- - -In the commentary on other vanes, we have traced through the lifecycle -of various external requests. This is generally a very reasonable order -to examine vanes since it will eventually cover the entire vane, and we -are never left wondering why we are doing something. - -For ford, however, it makes more sense to begin by discussing the -parsing and assembliing of hook files. Many of the possible requests -require us to assemble hook files, so we may as well examine this -immediately. - -First, we will examine the parsing. We parse a file at a beam to a hood -in `++fade:zo:za`. The top-level parsing rule is `++fair`, which takes a -beam and produces a rule to parse an entire hood file. - -A note on the naming scheme: the parsing the combinators that parse into -a particular structure are conventionally given the same name as the -structure. Although this locally clobbers the type names, this pattern -makes obvious the intent of the parsing combinators. - -We kick off with `++hood:fair`. - - ++ hood - %+ ifix [gay gay] - ;~ plug - ;~ pose - (ifix [;~(plug fas wut gap) gap] dem) - (easy zuse) - == - :: - ;~ pose - (ifix [;~(plug fas hep gap) gap] (most ;~(plug com gaw) hoot)) - (easy ~) - == - :: - ;~ pose - (ifix [;~(plug fas lus gap) gap] (most ;~(plug com gaw) hoof)) - (easy ~) - == - :: - (star ;~(sfix horn gap)) - (most gap hoop) - == - -There are five sections to a hood: system version, structures, -libraries, resources, and body. - -First, we parse the requested version number of the system. This is -specified with a unary `/?` rune. If not present, then we default to the -current version. - -Second, we may have zero or more `/-` runes followed by a parsing of a -`++hoot`, which represents a shared structure. - -Third, we may have zero or more `/+` runes followed by a parsing of a -`++hoof`, which represents a shared library. - -Fourth, we may have zero or more other `/` runes (as described in -`++horn`), which represent program-specific resources to be loaded. - -Fifth and finally, we must have one or more body statements (hoops), -which are either direct twigs or `//` runes. - - ++ hoot - ;~ pose - (stag %| ;~(pfix tar hoof)) - (stag %& hoof) - == - -A structure can either be a direct gate, or it can be a simple core. -Either one is parsed with `++hoof`, so we distinguish the two cases by -requireing core references to be prefixed by a `*`. - - ++ hoof - %+ cook |=(a=^hoof a) - ;~ plug - sym - ;~ pose - %+ stag ~ - ;~(plug ;~(pfix fas case) ;~(pfix ;~(plug fas sig) fed:ag)) - (easy ~) - == - == - -A hoof must have a name, which is a term. Optionally, we also include a -case and a ship. This is marked by appending a `/` followed by a case to -denote the requested version of the resource and a `/` followed by a -ship name to denote the requested source of the resource. For example, -`resource/1/~zod` requests the first version of `resource` on `~zod`. - - ++ case - %- sear - :_ nuck:so - |= a=coin - ?. ?=([%$ ?(%da %ud %tas) *] a) ~ - [~ u=(^case a)] - -Here, we parse a literal with `++nuck:so`, and we accept the input if it -is either an absolute date, an unsigned decimal, or a label. - -This leaves only horns and hoops to parse. Hoops are much simple to -parse, so we'll discuss those first. - - ++ hoop - ;~ pose - (stag %| ;~(pfix ;~(plug fas fas gap) have)) - (stag %& tall:vez) - == - -There are two types of hoops. Direct twigs are parsed with -`++tall:vast`, which is the just the hoon parser for a tall-form twig. - -References to external twigs are marked with a `//` rune followed by a -beam, which is parsed with `++have`. - - ++ hath (sear plex:voz (stag %clsg poor:voz)) :: hood path - ++ have (sear tome ;~(pfix fas hath)) :: hood beam - -`++have` parses a path with `++hath`, and then it converts the path into -a beam with `++tome`. - -`++hath` parses a `/`-separated list with `++poor:vast`, then converts -it to an actual path with `++plex:vast`. - -This leaves only horns to parse. - - ++ horn - =< apex - =| tol=? - |% - ++ apex - %+ knee *^horn |. ~+ - ;~ pfix fas - ;~ pose - (stag %toy ;~(sfix sym fas)) - (stag %ape ;~(pfix sig ape:read)) - (stag %arg ;~(pfix buc ape:read)) - (stag %day ;~(pfix bar day:read)) - (stag %dub ;~(pfix tis dub:read)) - (stag %fan ;~(pfix dot fan:read)) - (stag %for ;~(pfix com for:read)) - (stag %hub ;~(pfix pat day:read)) - (stag %man ;~(pfix tar man:read)) - (stag %nap ;~(pfix cen day:read)) - (stag %now ;~(pfix pam day:read)) - (stag %saw ;~(pfix sem saw:read)) - (stag %see ;~(pfix col see:read)) - (stag %sic ;~(pfix ket sic:read)) - == - == - -Horn parsing is slightly complex, so we create an internal core to -organize our code. Our core has a global variable of `tol`, which is -true if tall form is permissible and false if we're already in wide -form. We kick off the parsing with `++apex`. - -`++apex` specifies how each rune is parsed. This allows us to offload -the different ways of parsing the arguments to these runes into separate -arms. The exception here is that the `%toy` horn is simply of the form -`/mark/`. - -We'll examine each of the horn parsing arms right after we discuss -`++rail`, which is used in each one. - - ++ rail - |* [wid=_rule tal=_rule] - ?. tol wid - ;~(pose wid tal) - -This takes a wide-form and a tall-form parsing rule. If tall form is -permissible, then it allows either rule to match; else, it allows only -the wide form rule. - - ++ read - |% ++ ape - %+ rail - (ifix [sel ser] (stag %cltr (most ace wide:vez))) - ;~(pfix gap tall:vez) - -`++ape:read` parses for both the `/~` and the `/$` runes. It produces a -twig. The wide form is a tuple of one or more ace-separated wide-form -twigs parsed with `++wide:vast` and surrounded by `[` and `]`. The tall -form is a single tall form twig parsed by `++tall:vast` - - ++ day - %+ rail - apex(tol |) - ;~(pfix gap apex) - -This parses for the `/|`, `/@`, `/%`, and `/&` runes. It produces a -horn. The wide form is, recursively, the entire horn parser with tall -form disabled. The tall form is a gap followed by, recursively, the -entire horn parser. - - ++ dub - %+ rail - ;~(plug sym ;~(pfix tis apex(tol |))) - ;~(pfix gap ;~(plug sym ;~(pfix gap apex))) - -This parses for the `/=` rune. It produces a term followed by a horn. -The wide form is a symbol name followed by a `=` and, recursively, the -entire horn parser with tall form disabled. The tall form is a gap -followed by a symbol name, another gap, and, recursively, the entire -horn parser. - - ++ fan - %+ rail fail - ;~(sfix (star ;~(pfix gap apex)) ;~(plug gap duz)) - -This parses for the `/.` rune. It produces a list of horns. There is no -wide form. The tall form is a stet-terminated series of gap-separated -recursive calls to the entire horn parser. - - ++ for - %+ rail - ;~(plug (ifix [sel ser] hath) apex(tol |)) - ;~(pfix gap ;~(plug hath ;~(pfix gap apex))) - -This parses for the `/,` rune. It produces a path and a horn. The wide -form is a `[`-`]`-surrounded path followed by, recursively, the entire -horn parser with tall form disabled. The tall form is a gap followed by -a path, another gap, and, recursively, the entire horn parser. - - ++ man - %+ rail fail - %- sear - :_ ;~(sfix (star ;~(pfix gap apex)) ;~(plug gap duz)) - |= fan=(list ^horn) - =| naf=(list (pair term ^horn)) - |- ^- (unit (map term ^horn)) - ?~ fan (some (~(gas by *(map term ^horn)) naf)) - ?. ?=(%dub -.i.fan) ~ - $(fan t.fan, naf [[p.i.fan q.i.fan] naf]) - -This parses for the `/*` rune. It produces a map of spans to horns. -There is no wide form. The tall form is a stet-terminated series of -gap-separated recursive calls to the entire horn parser. All produced -horns are expected to be from `/=` runes. The term and horn in each `/=` -horn is inserted into the produced map as a key-value pair. - - ++ saw - %+ rail - ;~(plug ;~(sfix wide:vez sem) apex(tol |)) - ;~(pfix gap ;~(plug tall:vez ;~(pfix gap apex))) - -This parses for the `/;` rune. It produces a twig and a horn. The wide -form is a wide-form twig followed by a `;` and, recursively, the entire -horn parser with tall form disabled. The tall form is a gap followed by -a tall-form twig, another gap, and, recursively, the entire horn parser. - - ++ see - %+ rail - ;~(plug ;~(sfix have col) apex(tol |)) - ;~(pfix gap ;~(plug have ;~(pfix gap apex))) - -This parses for the `/:` rune. It produces a beam and a horn. The wide -form is a beam followed by a `;` and, recursively, the entire horn -parser with tall form disabled. The tall form is a gap followed by a -beam, another gap, and, recursively, the entire horn parser. - - ++ sic - %+ rail - ;~(plug ;~(sfix toil:vez ket) apex(tol |)) - ;~(pfix gap ;~(plug howl:vez ;~(pfix gap apex))) - -- - -This parses for the `/^` rune. It produces a tile and a horn. The wide -form is a wide-form tile, parsed with `++toil:vast`, followed by a `^` -and, recursively, the entire horn parser with tall form disabled. The -tall form is a gap followed by a tall-form tile, parsed with -`++howl:vast`, another gap, and, recursively, the entire horn parser. - -Assembling Hook Files ---------------------- - -At this point, we've parsed a hook file into a hood. We will now -describe exactly how this hood is assembled into a vase. The problem of -assembling is handled entirely within the `++meow:zo:za` core. - - ++ meow :: assemble - |= [how=beam arg=heel] - =| $: rop=(map term (pair hoof twig)) :: structure/complex - zog=(set term) :: structure guard - bil=(map term (pair hoof twig)) :: libraries known - lot=(list term) :: library stack - zeg=(set term) :: library guard - boy=(list twig) :: body stack - hol=? :: horns allowed? - == - |% - -We take two arguments and keep seven pieces of state. `how` is the -location of the hook file we're assembling, and `arg` is the heel, or -virtual path extension, of the file. - -In `rop`, we maintain a map of terms to pairs of hooves and twigs to -represent the structures we've encountered that we will put together in -a core at the top of the file. - -In `zog`, we maintain the set of structures we're in the middle of -loading. If we try to load a structure already in our dependency -ancestry, then we fail because we do not allow circular dependencies. -This enforces that our structure dependency graph is a DAG. - -In `bil`, we maintain a map of terms to pairs of hooves and twigs to -represent the libraries we've encountered that we will put together in a -series of cores after the structure core. - -In `lot`, we maintain a stack of library names as they are encountered -during a depth-first search. More precisely, we push a library onto the -stack after we've processed all its children. Thus, every library -depends only on things deeper in the list. The libraries must be loaded -in the reverse of this order. Concisely, this is a topological sort of -the library dependency partial ordering. - -In `zeg`, we maintain the set of libraries we're in the middle of -loading. If we try to load a library already in our dependency ancestry, -then we fail because we do not allow circular dependencies. This -enforces that our library dependency graph is a DAG. - -In `boy`, we maintain a stack of body twigs, which we'll put together in -a series of cores at the end of the file. - -In `hol`, we decide if we're allowed to contain horns. Libraries and -structures are not allowed to contain horns. - -We in every case enter `++meow` through `++abut`. You'll notice that -there are four (count 'em, four!) calls to `++cope` in `++abut`. If -you've glanced at the ford code in general, you've probably seen cope -over and over. It is called in 79 different places. We need to discuss -the use of this critical function in detail, so we may as well do it -here. - - ++ cope :: bolt along - |* [hoc=(bolt) fun=(burg)] - ?- -.q.hoc - %2 hoc - %1 hoc - %0 =+ nuf=(fun p.hoc q.q.hoc) - :- p=p.nuf - ^= q - ?- -.q.nuf - %2 q.nuf - %1 q.nuf - %0 [%0 p=(grom `_p.q.nuf`p.q.hoc p.q.nuf) q=q.q.nuf] - == == - -In monad-speak, this is the bind operator for the bolt monad. If monads -aren't your thing, don't worry, we're going to explain the use of cope -without further reference to them. - -Recall that there are three different types of bolt. A `%2` error bolt -contains a list of tanks describing the error, a `%1` block bolt -contains a set of resources we're blocked on, and a `%0` value bolt -contains an actual value and the set of its dependencies. - -We most commonly want to perform an operation on the value in a bolt if -it is a `%0` bolt. If it's not a `%0` bolt, we want to leave it alone. -This requires us to write a certain amount of boilerplate between each -of our operations to see if any of them produced a `%1` or a `%2` bolt. -This gets tiresome, so we pull it out into a separate arm and call it -`++cope`. - -Intuitively, we're calling the function `fun` with the value in `hoc`, -where `fun` takes an argument of type whatever is the value in a `%0` -case of `hoc`, and it produces a bolt of some (possibly different) type. -For brevity, we will refer to the type of the of the value in the `%0` -case of a bolt as the "type of the bolt". - -If the `hoc` bolt we're given as input to `fun` is already a `%1` or a -`%2` bolt, then we simply produce that. We don't even try to run `fun` -on it. - -Otherwise, we run `fun` with the arguments from the bolt and, if it -produces a `%1` or a `%2` bolt, we simply produce that. If it produces a -`%0` bolt, then we produce that with the old set of dependencies merged -in with the new set. - -We'll see more about how the bolt monad works as we run into more -interesting uses of it. For now, this is sufficient to move on with -`++abut`. - - ++ abut :: generate - |= [cof=cafe hyd=hood] - ^- (bolt vase) - %+ cope (apex cof hyd) - |= [cof=cafe sel=_..abut] - =. ..abut sel - %+ cope (maim cof pit able) - |= [cof=cafe bax=vase] - %+ cope (chap cof bax [%fan fan.hyd]) - |= [cof=cafe gox=vase] - %+ cope (maim cof (slop gox bax) [%tssg (flop boy)]) - |= [cof=cafe fin=vase] - (fine cof fin) - -Our job is simple: we must assemble a hood file into a vase. Hopefully, -the usage of `++cope` is fairly understandable. The correct way to read -this is that it does essentially five things. - -First, we call `++apex` to process the structures, libraries, and body. -This changes our state, so we set our context to the produced context. -Second, we call `++able` to assemble the strucutres and libraries into a -twig, which we slap against zuse with `++maim`. Third, we call `++chap` -to process the resources in the context of the already-loaded structures -and libraries. Fourth, we slap the body against the structures, -libraries, and resources. Fifth and finally, we produce the resultant -vase. - - ++ apex :: build to body - |= [cof=cafe hyd=hood] - ^- (bolt ,_..apex) - ?. |(hol ?=(~ fan.hyd)) - %+ flaw cof :_ ~ :- %leaf - "horns not allowed in structures and libraries: {<[how arg]>}" - %+ cope (body cof src.hyd) - |= [cof=cafe sel=_..apex] - =. ..apex sel - %+ cope (neck cof lib.hyd) - |= [cof=cafe sel=_..apex] - =. ..apex sel(boy boy) - %+ cope (head cof sur.hyd) - |= [cof=cafe sel=_..apex] - (fine cof sel) - -First, we make sure that if we're not allowed to have horns, we don't. -Otherwise, we produce and error with `++flaw`. - - ++ flaw |=([a=cafe b=(list tank)] [p=a q=[%2 p=b]]) :: bolt from error - -This produces a `%2` error bolt from a list of tanks. Fairly trivial. - -We should be starting to get used to the cope syntax, so we can see that -we really only do three things here. We process the body with `++body`, -the libraries with `++neck`, and the structures with `++head`. - - ++ body :: produce functions - |= [cof=cafe src=(list hoop)] - ^- (bolt _..body) - ?~ src (fine cof ..body) - %+ cope (wilt cof i.src) - |= [cof=cafe sel=_..body] - ^$(cof cof, src t.src, ..body sel) - -We must process a list of hoops that represent our body. If there are no -more hoops, we just produce our context in a `%0` bolt with `++fine`. - - ++ fine |* [a=cafe b=*] :: bolt from data - [p=`cafe`a q=[%0 p=*(set beam) q=b]] :: - -In monad-speak, this is the return operator. For us, this just means -that we're producing a `%0` bolt, which contains a path and a set of -dependencies. We assume there are no dependencies for the given data, or -that they will be added later. - -If there are more hoops in `++body`, we call `++wilt` to process an -individual hoop and recurse. - - ++ wilt :: process body entry - |= [cof=cafe hop=hoop] - ^- (bolt _..wilt) - ?- -.hop - %& (fine cof ..wilt(boy [p.hop boy])) - %| - %+ cool |.(leaf/"ford: wilt {<[(tope p.hop)]>}") - %+ cope (lend cof p.hop) - |= [cof=cafe arc=arch] - ?: (~(has by r.arc) %hoon) - %+ cope (fade cof %hoon p.hop) - |= [cof=cafe hyd=hood] - %+ cope (apex(boy ~) cof hyd) - |= [cof=cafe sel=_..wilt] - (fine cof sel(boy [[%tssg boy.sel] boy])) - =+ [all=(lark (slat %tas) arc) sel=..wilt] - %+ cope - |- ^- (bolt (pair (map term foot) _..wilt)) - ?~ all (fine cof ~ ..wilt) - %+ cope $(all l.all) - |= [cof=cafe lef=(map term foot) sel=_..wilt] - %+ cope ^$(all r.all, cof cof, sel sel) - |= [cof=cafe rig=(map term foot) sel=_..wilt] - %+ cope - %= ^^^^$ - cof cof - ..wilt sel(boy ~) - s.p.hop [p.n.all s.p.hop] - == - |= [cof=cafe sel=_..wilt] - %+ fine cof - [`(map term foot)`[[p.n.all [%ash [%tssg boy.sel]]] lef rig] sel] - |= [cof=cafe mav=(map term foot) sel=_..wilt] - ?~ mav - (flaw cof [%leaf "source missing: {<(tope p.hop)>}"]~) - (fine cof sel(boy [[%brcn mav] boy])) - == - -In the case of a direct twig hoop, we just push it onto `boy` and we're -done. In the case of an indirect hoop, we must compile the referenced -file. - -First, we push onto the stack trace a message indicating which file -exactly we're compiling at the moment with `++cool`. - - ++ cool :: error caption - |* [cyt=trap hoc=(bolt)] - ?. ?=(%2 -.q.hoc) hoc - [p.hoc [%2 *cyt p.q.hoc]] - -If an error occurred in computing `hoc`, we put the bunt of `cyt` onto -the stack trace. Thus, `cyt` is not evaluated at all unless an error -occurred. - -Next in `++wilt`, we load the information about the filesystem node -referenced by the hoop with `++lend`. - - ++ lend :: load arch - |= [cof=cafe bem=beam] - ^- (bolt arch) - =+ von=(ska %cy (tope bem)) - ?~ von [p=cof q=[%1 [bem ~] ~ ~]] - (fine cof ((hard arch) (need u.von))) - -This is a simple call to the namespace. If the resource does not yet -exist, we block on it by producing a `%1` bolt. Otherwise, we cast it to -an arch and produce this. - -Continuing in `++wilt`, we examine the produced arch. If the referenced -filesystem node has a `hoon` child node, then we've found the required -source, so we parse it with `++fade`. Recall that we referred earlier to -`++fade`. The salient point there is that it takes a beam, reads in the -hook file there, and parses it into a hood file with `++fair`. - -Now, we simply recurse on `++apex` to compile the new hood. Note that, -while we do clear the `boy` list, we do not clear the other lists. Thus, -we are accumulating all the structures and libraries referenced in all -the referenced hook files in one group, which we will put at the top of -the product. - -After this, we put the new list of body twigs into a `=~`, push this -onto our old list of body twigs, and produce the result. - -If there is no hoon file here, then we descend into each of our children -until we find a hoon file. First, we produce a list of all our children -whose names are terms with `++lark`. - - ++ lark :: filter arch names - |= [wox=$+(span (unit ,@)) arc=arch] - ^- (map ,@ span) - %- ~(gas by *(map ,@ span)) - =| rac=(list (pair ,@ span)) - |- ^+ rac - ?~ r.arc rac - =. rac $(r.arc l.r.arc, rac $(r.arc r.r.arc)) - =+ gib=(wox p.n.r.arc) - ?~(gib rac [[u.gib p.n.r.arc] rac]) - -We traverse the children map of `arc` to filter out those children whose -names aren't accepted by `wox` and produce a map from the product of -`wox` to the original name. `++lark` is used in many cases to parse -names into other types, like numbers or dates, ignoring those which do -not fit the format. In `++wilt`, though, we simply want to filter out -those children whose names are not terms. - -Next, we will produce a map from terms to feet. Each of these feet will -be placed in a core named by the child name, and it will contain arms -according to its children. Thus, if the indirect hoop references -`/path`, then to access the twig defined in `/path/to/twig/hoon`, our -body must refer to `twig:to`. - -If there are no more children, then we are done, so we produce our -current context. - -Else, we recurse into the left and right sides of our map. Finally, we -process our current entry in the map. We first recurse by calling -`++wilt` one level down. Thus, in the previous example, the first time -we get to this point we are processing `/path`, so we recurse on -`++wilt` with path `/path/to`. We also remove our current body from the -recursion, so that we may add it back in later the way we want to. - -After recursing, we push the new body onto our map, keyed by its name. -We also produce the new context so that all external structures, -libraries, and resources are collected into the same place. - -Finally, we have a map of names to feet. If this map is empty, then -there were no twigs at the requested path, so we give an error with -`++flaw`. - -If the map is nonempty, then we finally produce our context with with -one thing pushed onto the front: a core made out of the map we just -produced. - -This concludes our discussion of `++wilt` and `++body`. Thus, it remains -in `++apex` to discuss `++neck` and `++head`. - - ++ neck :: consume libraries - |= [cof=cafe bir=(list hoof)] - ^- (bolt ,_..neck) - ?~ bir (fine cof ..neck) - ?: (~(has in zeg) p.i.bir) - (flaw cof [%leaf "circular library dependency: {}"]~) - =+ gez=(~(put in zeg) p.i.bir) - =+ byf=(~(get by bil) p.i.bir) - ?^ byf - ?. =(`hoof`i.bir `hoof`p.u.byf) - (flaw cof [%leaf "library mismatch: {<~[p.u.byf i.bir]>}"]~) - $(bir t.bir) - =+ bem=(hone %core %lib i.bir) - %+ cope (fade cof %hook bem) - |= [cof=cafe hyd=hood] - %+ cope (apex(zeg gez, hol |, boy ~) cof hyd) - |= [cof=cafe sel=_..neck] - =. ..neck - %= sel - zeg zeg - hol hol - lot [p.i.bir lot] - bil (~(put by bil) p.i.bir [i.bir [%tssg (flop boy.sel)]]) - == - ^^$(cof cof, bir t.bir) - -Here, we're going to consume the list of libraries and place them in -`bil`. If there are no more libraries, we're done, so we just produce -our current context. - -Otherwise, we check to see if the next library in the list is in `zeg`. -If so, then this library is one of the libraries that we're already in -the middle of compiling. There is a circular dependency, so we fail. - -Otherwise, we let `gez` be `zeg` plus the current library so that while -compiling the dependencies of this library we don't later create a -circular dependency. We check next to see if this library is alredy in -`bil`. If so, then we have already included this library earlier, so we -check to see if this is the same version of the library as we included -earlier. If so, we skip it. Else, we fail since we can't include two -different versions of a library. We really should allow for newer -versions of a library since in kelvin versioning we assume backwards -compatibility, but for now we require an exact match. - -If we haven't already included this library, then we're going to do -that. First, we get the location of the library with `++hone`. - - ++ hone :: plant hoof - |= [for=@tas way=@tas huf=hoof] - ^- beam - ?~ q.huf - how(s ~[for p.huf way]) - [[q.u.q.huf %main p.u.q.huf] ~[for p.huf way]] - -If we haven't specified the version of the library, we use the current -ship, desk, and case. Otherwise, we use the given ship and case on desk -`%main`. In either case, the path is `/way/p.huf/for`. In the case of -`++neck`, this means `/lib/core/[library name]`. - -In `++neck`, we next compile the hook file at that location with -`++fade`. Again, we will delay the discussion of `++fade`, noting only -that it takes a beam and parses the hook file there into a hood. - -We recurse on this to compile the library. During the compilation, we -let `zeg` be `gez` to avoid circular dependencies, we let `hol` be false -since we don't allow horns in libraries, and we let `boy` be null so -that we can isolate the new body twigs. - -Next, we reintegrate the new data into our context. We use the context -created by the recursion with four changes. First, we reset `zeg` to our -old `zeg`. Second, we reset `hol` to our old `hol`. Third, we put the -name of our library onto the stack of libraries. This means all of a -libraries dependencies will be earlier in `lot` than the library itself, -making `lot` a topological ordering on the dependency graph. Fourth, we -put in `bil` the library hoof and body (with all body twigs collected in -a `=~`), keyed by the library name. - -Finally, we recurse, processing the next library in our list. - -To complete our disucssion of `++apex`, we must process our structures. - - ++ head :: consume structures - |= [cof=cafe bir=(list hoot)] - |- ^- (bolt ,_..head) - ?~ bir - (fine cof ..head) - ?: (~(has in zog) p.q.i.bir) - (flaw cof [%leaf "circular structure dependency: {}"]~) - =+ goz=(~(put in zog) p.q.i.bir) - =+ byf=(~(get by rop) p.q.i.bir) - ?^ byf - ?. =(`hoof`q.i.bir `hoof`p.u.byf) - (flaw cof [%leaf "structure mismatch: {<~[p.u.byf q.i.bir]>}"]~) - $(bir t.bir) - =+ bem=(hone ?:(p.i.bir %gate %core) %sur q.i.bir) - %+ cope (fade cof %hook bem) - |= [cof=cafe hyd=hood] - %+ cope (apex(zog goz, hol |, boy ~) cof hyd) - |= [cof=cafe sel=_..head] - ?. =(bil bil.sel) - (flaw cof [%leaf "structures cannot include libraries: {}"]~) - =. ..head - %= sel - boy ?: p.i.bir - boy - (welp boy [[[%cnzy p.q.i.bir] [%$ 1]] ~]) - zog zog - hol hol - rop %+ ~(put by (~(uni by rop) rop.sel)) - p.q.i.bir - [q.i.bir [%tssg (flop boy.sel)]] - == - ^^$(cof cof, bir t.bir) - -The processing of our structures is very similar to that of our -libraries. For clarity, we'll use many of the same phrases in describing -the parallel natures. First, we check to see if there are more -structures to process. If not, we're done, so we produce our context. - -Otherwise, we let `goz` be `zog` plus the current structure so that -while compiling the dependencies of this structure we don't later create -a circular dependency. We check next to see if this structure is alredy -in `rop`. If so, then we have already included this structure earlier, -so we check to see if this is the same version of the structure as we -included earlier. If so, we skip it. Else, we fail since we can't -include two different versions of a structure. - -If we haven't loaded this structure, then we call `++hone` to get the -beam where the file structure should be. If the loobean in the hoot is -true, then we're looking for a gate; otherwise, we're looking for a -core. We parse this file with `++fade`. - -Now, we recurse on this to compile the structure. During the recursion, -there we have threee changes. Frist, we let `zog` be `goz` so that we -don't create a circular dependency. Second, we let `hol` be false since -we do not allow horns in structures. Third, we let `boy` be null so that -we can isolate the new body twigs. - -Next, we reintegrate the new data into our context. We use the context -cretaed by the recursion with four changes. First, if we're including a -gate structure, then we reset the body to its original body. Else we put -on the top of our list of body twigs what is essentially a -`=+ structure-name` to take off the face of the structure. Second, we -reset `zog` to our old `zog`. Third, we reset `hol` to our old `hol`. -Finally, we put in `rop` the structure hoof and body (with all body -twiggs collected in a `=~`), keyed by the structure name. - -Finally, we recurse, processing the next structure in our list. - -This concludes our discussion of `++apex`. - - ++ abut :: generate - |= [cof=cafe hyd=hood] - ^- (bolt vase) - %+ cope (apex cof hyd) - |= [cof=cafe sel=_..abut] - =. ..abut sel - %+ cope (maim cof pit able) - |= [cof=cafe bax=vase] - %+ cope (chap cof bax [%fan fan.hyd]) - |= [cof=cafe gox=vase] - %+ cope (maim cof (slop gox bax) [%tssg (flop boy)]) - |= [cof=cafe fin=vase] - (fine cof fin) - -Returning to `++abut`, we have now processed the structures, libraries -and body twigs. Next, we slap our preamble (structures and libraries) -against zuse. First, we construct our preamble in `++able`. - - ++ able :: assemble preamble - ^- twig - :+ %tsgr - ?:(=(~ rop) [%$ 1] [%brcn (~(run by rop) |=([* a=twig] [%ash a]))]) - [%tssg (turn (flop lot) |=(a=term q:(need (~(get by bil) a))))] - -We first put the structures in `rop` into a single `|%` at the top and -`=>` it onto a `=~` of our libraries, in the reverse order that they -appear in `lot`. Thus, the structures are in a single core while the -libraries are in consecutive cores. - -We slap the preamble against zuse with `++maim`. - - ++ maim :: slap - |= [cof=cafe vax=vase gen=twig] - ^- (bolt vase) - %+ (clef %slap) (fine cof vax gen) - |= [cof=cafe vax=vase gen=twig] - =+ puz=(mule |.((~(mint ut p.vax) [%noun gen]))) - ?- -.puz - | (flaw cof p.puz) - & %+ (coup cof) (mock [q.vax q.p.puz] (mole ska)) - |= val=* - `vase`[p.p.puz val] - == - -Here we start to get into ford's caching system. We wrap our computation -in a call to `++clef` so that we only actually compute it if the result -is not already in our cache. First we'll discuss the computation, then -we'll discuss the caching system. - -We call `++mule` with a call to `++mint:ut` on the type of our subject -vase against the given twig. In other words, we're compiling the twig -with against the subject type in the given subject vase. - -If compilation fails, then we produce an error bolt with the produced -stack trace. Otherwise, we run the produced nock with `++mock` and our -sky function. We convert the produced toon to a bolt with `++coup` and -use the type from `puz` combined with the value from `mock` to produce -our vase. - -If this process seems harder than just calling `++slap`, it's because it -is. We have two requirements that `++slap` doesn't satisfy. First, we -want the to use an explicit sky function for use with `.^`. With -`++slap`, you get whatever sky function is available in the calling -context, which in ford is none. Second, we want to explicitly handle the -stack trace on failure. `++slap` would cause crash on failure. - -We haven't yet discussed either `++clef` or `++coup`. We'll start with -`++coup` to finish the discussion of the computation. - -\``++ coup :: toon to bolt |= cof=cafe |* [ton=toon fun=$+(* *)] :- p=cof ^= q ?- -.ton %2 [%2 p=p.ton] %0 [%0 p=*(set beam) q=(fun p.ton)] %1 ~& [%coup-need ((list path) p.ton)] =- ?- -.faw & [%1 p=(sa (turn p.faw |=(a=beam [a *(list tank)])))] | [%2 p=p.faw] == ^= faw |- ^- (each (list beam) (list tank)) ?~ p.ton [%& ~] =+ nex=$(p.ton t.p.ton) =+ pax=(path i.p.ton) ?~ pax [%| (smyt pax) ?:(?=(& -.nex) ~ p.nex)] =+ zis=(tome t.pax) ?~ zis [%| (smyt pax) ?:(?=(& -.nex) ~ p.nex)] ?- -.nex & [%& u.zis p.nex] | nex == ==` - -Recall that a toon is either a `%0` value, a `%1` block, or a `%2` -failure. Converting a `%2` toon failure into a `%2` bolt failure is -trivial. Converting a `%0` toon value into a `%0` bolt value is easy -since we assume there were no dependencies. Converting the blocks is -rather more difficult. - -To compute `faw`, we recurse through the list of paths in the `%1` toon. -At each one, we make sure with `++tome` that it is, in fact, a beam. If -so, then we check to see if the later paths succeed as well. If so, we -append the current path to the list of other paths. If not, we produce -the error message we got from processing the rest of the paths. If this -path is not a beam, then we fail, producing a list of tanks including -this path and, if later paths fail too, those paths as well. - -If some paths were not beams, then we produce a `%2` error bolt. If all -paths were correct, then we produce a `%1` blocking bolt. - -We will now discuss `++clef`. This is where the cache magic happens. - - ++ clef :: cache a result - |* sem=* - |* [hoc=(bolt) fun=(burg)] - ?- -.q.hoc - %2 hoc - %1 hoc - %0 - =^ cux p.hoc ((calk p.hoc) sem q.q.hoc) - ?~ cux - =+ nuf=(cope hoc fun) - ?- -.q.nuf - %2 nuf - %1 nuf - %0 - :- p=(came p.nuf `calx`[sem `calm`[now p.q.nuf] q.q.hoc q.q.nuf]) - q=q.nuf - == - [p=p.hoc q=[%0 p=p.q.hoc q=((calf sem) u.cux)]] - == - -If the value is already an error or a block, we just pass that through. -Otherwise, we look up the request in the cache with `++calk`. - - ++ calk :: cache lookup - |= a=cafe :: - |= [b=@tas c=*] :: - ^- [(unit calx) cafe] :: - =+ d=(~(get by q.a) [b c]) :: - ?~ d [~ a] :: - [d a(p (~(put in p.a) u.d))] :: - -When looking up something in the cache, we mark it if we find it. This -way, we have in our cache the set of all cache entries that have been -referenced. While we do not at present do anything with this data, it -should be used to clear out old and unused entries in the cache. - -Moving on in `++clef`, we check to see if we actually found anything. If -we didn't find a cache entry, then we run the computation in `fun`, and -examine its result. If it produced a `%2` error or `%1` block bolt, we -just pass that through. Otherwise, we produce both the value and an -updated cache with this new entry. We add the entry with `++came`. - - ++ came :: - |= [a=cafe b=calx] :: cache install - ^- cafe :: - a(q (~(put by q.a) [-.b q.b] b)) :: - -We key cache entries by the type of computation (`-:calx`) and the -inputs to the computation (`q:calc`). This just puts the cache line in -the cache at the correct key. - -Back in `++clef`, if we did find a cache entry, then we just produce the -value at that cache line. We convert the cache line into a value with -`++calf`. - - ++ calf :: reduce calx - |* sem=* :: a typesystem hack - |= cax=calx - ?+ sem !! - %hood ?>(?=(%hood -.cax) r.cax) - %slap ?>(?=(%slap -.cax) r.cax) - %slam ?>(?=(%slam -.cax) r.cax) - == - -This is simply a typesystem hack. Because the `sem` is passed in through -a wet gate, we know at type time which of the three cases will be -chosen. Thus, the correct type of the value in the cache line gets -passed through to the caller. This also depends on the fact that -`++clef` is wet. The type stuff here is mathematically interesting, but -the action is simple: we get the value from the cache line. - -This concludes our discussion of `++clef` and `++maim`. - -Back in `++abut`, recall that we processed the structures, libraries, -and body with `++apex`. Then, we slapped our preamble (structures and -libraries) against zuse with `++maim`. Next, we process our resources -with `++chap`. Note that we pass in the preamble so that we may refer to -anything in there in our resources. - -`++chap` is broken up into a different case for each horn. We'll go -through them one by one. - - ++ chap :: produce resources - |= [cof=cafe bax=vase hon=horn] - ^- (bolt vase) - ?- -.hon - %ape (maim cof bax p.hon) - -This is `/~`. We slap the twig against our context. - - %arg - %+ cope (maim cof bax p.hon) - |= [cof=cafe gat=vase] - (maul cof gat !>([how arg])) - -This is `/$`. We slap the twig against our context, which we expect to -produce a gate. We slam this gate with a sample of `how` and `arg`, -which is our location and the heel (virtual path extension). - -`++maul` is similar to `++maim`, but it slams instead of slaps. - - ++ maul :: slam - |= [cof=cafe gat=vase sam=vase] - ^- (bolt vase) - %+ (clef %slam) (fine cof gat sam) - |= [cof=cafe gat=vase sam=vase] - =+ top=(mule |.((slit p.gat p.sam))) - ?- -.top - | (flaw cof p.top) - & %+ (coup cof) (mong [q.gat q.sam] (mole ska)) - |= val=* - `vase`[p.top val] - == - -We cache slams exactly as we cache slaps. We use `++slit` to find the -type of the product of the slam given the types of the gate and the -sample. - -If this type fails, we produce the given stack trace as a `%2` error -bolt. Otherwise, we produce the top produced above combined with the -value we get from slamming the values in the vases with `++mong`. - -Back to `++chap`. - - %day (chad cof bax %dr p.hon) - -This is `/|`. We call `++chad` to convert textual names to relative -dates and process the next horn against each of the discovered paths. - - ++ chad :: atomic list - |= [cof=cafe bax=vase doe=term hon=horn] - ^- (bolt vase) - %+ cope ((lash (slat doe)) cof how) - |= [cof=cafe yep=(map ,@ span)] - =+ ^= poy ^- (list (pair ,@ span)) - %+ sort (~(tap by yep) ~) - |=([a=[@ *] b=[@ *]] (lth -.a -.b)) - %+ cope - |- ^- (bolt (list (pair ,@ vase))) - ?~ poy (fine cof ~) - %+ cope $(poy t.poy) - |= [cof=cafe nex=(list (pair ,@ vase))] - %+ cope (chap(s.how [q.i.poy s.how]) cof bax hon) - |= [cof=cafe elt=vase] - (fine cof [[p.i.poy elt] nex]) - |= [cof=cafe yal=(list (pair ,@ vase))] - %+ fine cof - |- ^- vase - ?~ yal [[%cube 0 [%atom %n]] 0] - (slop (slop [[%atom doe] p.i.yal] q.i.yal) $(yal t.yal)) - -First, we call `++lash` to parse the children of the current beam and -pick out those ones that are of the requested format. - - ++ lash :: atomic sequence - |= wox=$+(span (unit ,@)) - |= [cof=cafe bem=beam] - ^- (bolt (map ,@ span)) - %+ cope (lend cof bem) - |= [cof=cafe arc=arch] - (fine cof (lark wox arc)) - -First, we get the arch with `++lend`, as described above. We filter and -parse the child names with `++lark` according to the given parser -function. - -In `++chad`, this parser function is `(slat doe)`, which will parse a -cord into an atom of the requested odor. For `%day` the odor is for -relative dates. - -Thus, we now have a map from atoms of the given odor to the actual child -names. We next turn this map into a list and sort it in increasing order -by the atom. - -We next convert this list of pairs of atoms and spans to a list of pairs -of atoms and vases. We process the given horn once at every child beam, -producing the resource at that location. - -Finally, we convert this list of pairs of atoms and vases to a vase of a -list of pairs of atoms to (well-typed) values. Each entry in the list is -of type atom with the given odor combined with the type of the produced -vase. - -Back in `++chap`, we continue parsing resources. - - %dub - %+ cope $(hon q.hon) - |= [cof=cafe vax=vase] - (fine cof [[%face p.hon p.vax] q.vax]) - -This is `/=`. We process the given horn, giving us a vase. We put as a -face on the vase so that it may be referred to later by name. - - %fan - %+ cope - |- ^- (bolt (list vase)) - ?~ p.hon (fine cof ~) - %+ cope ^$(hon i.p.hon) - |= [cof=cafe vax=vase] - %+ cope ^$(cof cof, p.hon t.p.hon) - |= [cof=cafe tev=(list vase)] - (fine cof [vax tev]) - |= [cof=cafe tev=(list vase)] - %+ fine cof - |- ^- vase - ?~ tev [[%cube 0 [%atom %n]] 0] - (slop i.tev $(tev t.tev)) - -This is `/.`. We first process each of the child horns, producing a list -of vases. This is done by just recursing on `++chap`. Then, we simply -fold over this list to create a vase of the list of values. - - %for $(hon q.hon, s.how (weld (flop p.hon) s.how)) - -This is `/,`. We simply recurse on the horn with the given path welded -onto our current beam. - - %hub (chad cof bax %ud p.hon) - -This is `/@`. This is exactly like the processing of `%day` except we -expect the children to be named as unsigned integers rather than -relative dates. We process the horn at each of the children's locations -and produce a list of pairs of absolute dates and values. - - %man - |- ^- (bolt vase) - ?~ p.hon (fine cof [[%cube 0 [%atom %n]] 0]) - %+ cope $(p.hon l.p.hon) - |= [cof=cafe lef=vase] - %+ cope ^$(cof cof, p.hon r.p.hon) - |= [cof=cafe rig=vase] - %+ cope ^^^$(cof cof, hon q.n.p.hon) - |= [cof=cafe vax=vase] - %+ fine cof - %+ slop - (slop [[%atom %tas] p.n.p.hon] vax) - (slop lef rig) - -This is `/*`. We process each of the horns in the given map by recursion -through `++chap`. Once we have these vases, we create a vase of a map -from the given textual names to the produced values. - - %now (chad cof bax %da p.hon) - -This is `/&`. This is exactly like the processing of `%now` except we -expect the children to be names as absolute dates rather than relative -dates. We process the horn at each of the children's locations and -produce a list of pairs of absolute dates and values. - - %nap (chai cof bax p.hon) - -This is `/%`. Here, we process the horn at each of our children with -`++chai`. - - ++ chai :: atomic map - |= [cof=cafe bax=vase hon=horn] - ^- (bolt vase) - %+ cope (lend cof how) - |= [cof=cafe arc=arch] - %+ cope - |- ^- (bolt (map ,@ vase)) - ?~ r.arc (fine cof ~) - %+ cope $(r.arc l.r.arc) - |= [cof=cafe lef=(map ,@ vase)] - %+ cope `(bolt (map ,@ vase))`^$(cof cof, r.arc r.r.arc) - |= [cof=cafe rig=(map ,@ vase)] - %+ cope (chap(s.how [p.n.r.arc s.how]) cof bax hon) - |= [cof=cafe nod=vase] - (fine cof [[p.n.r.arc nod] lef rig]) - |= [cof=cafe doy=(map ,@ vase)] - %+ fine cof - |- ^- vase - ?~ doy [[%cube 0 [%atom %n]] 0] - %+ slop - (slop [[%atom %a] p.n.doy] q.n.doy) - (slop $(doy l.doy) $(doy r.doy)) - -We get the arch at our current beam with `++lend`. Then, we process the -horn at each of our children to give us a map of atoms to vases. -Finally, we convert that into a vase of a map of these atoms to the -values. This is very similar to `++chad` and the handling of `%man`. - - %see $(hon q.hon, how p.hon) - -This is `/:`. We process the given horn at the given beam. - - %saw - %+ cope $(hon q.hon) - |= [cof=cafe sam=vase] - %+ cope (maim cof bax p.hon) - |= [cof=cafe gat=vase] - (maul cof gat sam) - -This is `/;`. First, we process the given horn. Then, we slap the given -twig against our context to produce (hopefully) a gate. Finally, we slam -the vase we got from processing the horn against the gate. - - %sic - %+ cope $(hon q.hon) - |= [cof=cafe vax=vase] - %+ cope (maim cof bax [%bctr p.hon]) - |= [cof=cafe tug=vase] - ?. (~(nest ut p.tug) | p.vax) - (flaw cof [%leaf "type error: {} {}"]~) - (fine cof [p.tug q.vax]) - -This is `/^`. First, we process the given horn. Then, we slap the the -bunt of the given tile against our context. This will produce a vase -with the correct type. We test to see if this type nests within the type -of the vase we got from processing the horn. If so, we produce the value -from the horn along with the type from the tile. Otherwise, we produce a -`%2` error bolt. - - %toy (cope (make cof %bake p.hon how ~) feel) - == - -This is `/mark/`. Here, we simply run the `%bake` silk on the given -mark, producing a cage. We convert this cage into a vase with `++feel`, -which is exactly as simple as it sounds like it should be. - - ++ feel |=([a=cafe b=cage] (fine a q.b)) :: cage to vase - -This is trivial. - -We will discuss later `++make` and how `%bake` is processed. Suffice it -to say that baking a resource with a given mark gets the resource and -converts it, if necessary, to the requested mark. - -This concludes our discussion of `++chap`. - -We return once more to `++abut`. - - ++ abut :: generate - |= [cof=cafe hyd=hood] - ^- (bolt vase) - %+ cope (apex cof hyd) - |= [cof=cafe sel=_..abut] - =. ..abut sel - %+ cope (maim cof pit able) - |= [cof=cafe bax=vase] - %+ cope (chap cof bax [%fan fan.hyd]) - |= [cof=cafe gox=vase] - %+ cope (maim cof (slop gox bax) [%tssg (flop boy)]) - |= [cof=cafe fin=vase] - (fine cof fin) - -Recall that we processed our structures, libraries and body with -`++apex`. We slapped our structures and libraries against zuse with -`++maim`. We processed our resources with `++chap`. Now, all our body -twigs are collected in a `=~` and slapped against our structures, -libraries, and resources. This produces our final result. - -The hook file has been assembled. And there was great rejoicing. - -Lifecycle of a Kiss -------------------- - -We're now going to go through a series of lifecycle descriptions. When a -user of ford sends a kiss, it is one of a dozen different types of silk. -We'll go through each one, tracing through the flow of control of each -of these. - -First, though, we'll describe the common handling to all kisses. - -The silk in a `%exec` kiss to ford ends up in `++apex`, so we'll enter -the narrative here. - - ++ apex :: call - |= kus=(unit silk) - ^+ +> - ?~ kus - =+ nym=(~(get by dym.bay) hen) - ?~ nym :: XX should never - ~& [%ford-mystery hen] - +>.$ - =+ tas=(need (~(get by q.tad.bay) u.nym)) - amok:~(camo zo [u.nym tas]) - =+ num=p.tad.bay - ?< (~(has by dym.bay) hen) - =: p.tad.bay +(p.tad.bay) - dym.bay (~(put by dym.bay) hen num) - == - ~(exec zo [num `task`[hen u.kus 0 ~]]) - -Recall that a `%exec` kiss actually sends a unit silk. If it's null, -we're trying to cancel the request. We first look up the task number -keyed by duct. If we don't find it, then we're trying to cancel a -request that either was never started or has already completed. We print -out `%ford-mystery` and do nothing. If we do find the task number, then -we look up the task from it, call `++camo:zo` to cancel pending -requests, and call `++amok:zo` to remove the task from our task lists. - - ++ camo :: stop requests - ^+ . - =+ kiz=(~(tap by q.kig) *(list ,[p=@ud q=beam])) - |- ^+ +> - ?~ kiz +> - %= $ - kiz t.kiz - mow :_ mow - :- hen - :^ %pass [(scot %p our) (scot %ud num) (scot %ud p.i.kiz) ~] - %c - [%warp [our p.q.i.kiz] q.q.i.kiz ~] - == - -Our list of blocks is in `q.kig`, so we iterate over it, cancelling our -pending requests for each block. Our requests are all to clay, so we -need only to send `%warp` kisses with a null instead of a rave. - - ++ amok - %_ ..zo - q.tad.bay (~(del by q.tad.bay) num) - dym.bay (~(del by dym.bay) nah) - == - -We remove the task number from the map of numbers to tasks and the duct -from the map of ducts to task numbers. - -Back in `++apex`, if we were given a silk, we need to process it. We add -the task to our maps, increment the next task number, and call -`++exec:zo` on the new task. - - ++ exec :: execute app - ^+ ..zo - ?: !=(~ q.kig) ..zo - |- ^+ ..zo - =+ bot=(make [~ jav.bay] kas) - =. ..exec (dash p.bot) - ?- -.q.bot - %0 amok:(expo [%made %& p.q.bot q.q.bot]) - %2 amok:(expo [%made %| p.q.bot]) - %1 =+ zuk=(~(tap by p.q.bot) ~) - =< abet - |- ^+ ..exec - ?~ zuk ..exec - =+ foo=`_..exec`(camp %x `beam`p.i.zuk) - $(zuk t.zuk, ..exec foo) - == - -If we're still blocked on something in `q.kig`, we don't do anything. - -Otherwise, we try to process the silk with `++make`. `++make` handles -each individual request and will be the entire focus of the remainder of -this doc after this section. It produces a bolt of a cage. - -We put the new cache in our state with `++dash`. - - ++ dash :: process cache - |= cof=cafe - ^+ +> - %_(+> jav.bay q.cof) - -The cache is put in the baby so that it gets stored across calls to -ford. - -In `++exec`, we process the bolt in three different ways according to -the type of bolt produced. If we produced a `%0` value bolt, we use -`++expo` to give the produced value and set of dependencies as a `%made` -gift, and we remove ourselves from the task list with `++amok`. - - ++ expo :: return gift - |= gef=gift - %_(+> mow :_(mow [hen %give gef])) - -We simply push the gift onto our list of moves. - -In `++exec`, if we produced a `%2` error bolt, we produce a `%made` gift -with the stack trace. - -If we produced a `%1` block bolt, we iterate through each of the blocks -and call `++camp` to produce a clay request for the resource. - - ++ camp :: request a file - |= [ren=care bem=beam] - ^+ +> - %= +> - kig [+(p.kig) (~(put by q.kig) p.kig bem)] - mow :_ mow - :- hen - :^ %pass [(scot %p our) (scot %ud num) (scot %ud p.kig) ~] - %c - [%warp [our p.bem] q.bem [~ %& %x r.bem s.bem]] - == - -We put the resource in our block list in `q.kig` so that we save the -fact that we're blocked. We then produce the `%warp` request to clay for -the resource. Our request path has the format -\`/[our-ship]/[task-number]/[block-number]'. - -We'll now describe how each of the individual silks are processed in -`++make`. - -Lifecycle of a Cell -------------------- - - ^ - %. [cof p.kas q.kas] - ;~ cope - ;~ coax - |=([cof=cafe p=silk q=silk] ^$(cof cof, kas p.kas)) - |=([cof=cafe p=silk q=silk] ^$(cof cof, kas q.kas)) - == - :: - |= [cof=cafe bor=cage heg=cage] ^- (bolt cage) - [p=cof q=[%0 ~ [%$ (slop q.bor q.heg)]]] - == - -Silks autocons. The product of a cell of silks is a cell of the products -of the silks, so we evaluate the two silks in parallel with `++coax` and -slop together the results in a cell vase. We mark the product with `%$`, -which means we know no more mark information than that it is a noun. - - ++ coax :: bolt across - |* [hoc=(bolt) fun=(burg)] - ?- -.q.hoc - %0 =+ nuf=$:fun(..+<- p.hoc) - :- p=p.nuf - ^= q - ?- -.q.nuf - %0 [%0 p=(grom p.q.hoc p.q.nuf) q=[q.q.hoc q.q.nuf]] - %1 q.nuf - %2 q.nuf - == - %1 =+ nuf=$:fun(..+<- p.hoc) - :- p=p.nuf - ^= q - ?- -.q.nuf - %0 q.hoc - %1 [%1 p=(grom p.q.nuf p.q.hoc)] - %2 q.nuf - == - %2 hoc - == - -If the first bolt is a value, we evaluate the burg to get the next bolt. -If that also produces a value, we merge the dependency sets and produce -a cell of the two values. Otherwise, we produce the block or error of -the second bolt. - -If the first bolt is a block, we evaluate the burg to get the next bolt. -If that produces a value, we just produce the block. If it produces a -block, we merge the two block sets. If it produces an error, we produce -that error. - -If the first bolt is already an error, we just pass that through. - -Note that `++coax` (and, indeed, `++cope`) is reasonable to use with -`;~`. - -Lifecycle of a `%bake` ----------------------- - - %bake - %+ cool |.(leaf/"ford: bake {} {<(tope q.kas)>}") - %+ cope (lima cof p.kas q.kas r.kas) - |= [cof=cafe vux=(unit vase)] - ?~ vux - (flaw cof (smyt (tope q.kas)) ~) - (fine cof [p.kas u.vux]) - -This is one of the most critical silks. We are going to functionally -produce the hook file at the given beam with the given heel. The result -will be of the correct mark, even if we need to run conversion -functions. The functionality is encapsulated in `++lime`. If it produces -null, then we produce an error. Otherwise, we take the vase produced and -give it the correct mark. - - ++ lima :: load at depth - |= [cof=cafe for=mark bem=beam arg=heel] - ^- (bolt (unit vase)) - %+ cope (lend cof bem) - |= [cof=cafe arc=arch] - ^- (bolt (unit vase)) - ?: (~(has by r.arc) for) - (lace cof for bem(s [for s.bem]) arg) - =+ haz=(turn (~(tap by r.arc) ~) |=([a=@tas b=~] a)) - ?~ haz (fine cof ~) - %+ cope (lion cof for -.bem haz) - |= [cof=cafe wuy=(unit (list ,@tas))] - ?~ wuy (fine cof ~) - ?> ?=(^ u.wuy) - %+ cope (make cof %bake i.u.wuy bem arg) - |= [cof=cafe hoc=cage] - %+ cope (lope cof i.u.wuy t.u.wuy -.bem q.hoc) - |= [cof=cafe vax=vase] - (fine cof ~ vax) - -First, we load the arch at the given beam with `++lend`. If we have a -child named the mark, our job is straightforward, so we go ahead and -load that with `++lace`. - -Otherwise, we iterate through our children. If we have no children, we -produce null, signifying that we didn't find any way to convert to the -requested mark. Otherwise, we call `++lion` to find a translation path -from one of the available marks into the target mark. We recursively -bake the child that has a path to the target mark, and then we call -`++lope` to translate this mark into the target mark. - -We'll first discuss the direct case of when one of our children is of -the correct mark. - - ++ lace :: load and check - |= [cof=cafe for=mark bem=beam arg=heel] - ^- (bolt (unit vase)) - =+ bek=`beak`[p.bem q.bem r.bem] - %+ cope (lend cof bem) - |= [cof=cafe arc=arch] - ?^ q.arc - (cope (cope (liar cof bem) (lake for bek)) fest) - ?: (~(has by r.arc) %hook) - %+ cope (fade cof %hook bem) - |= [cof=cafe hyd=hood] - (cope (cope (abut:(meow bem arg) cof hyd) (lake for bek)) fest) - (fine cof ~) - -First, we get the arch at the given beam with `++lend`. If this is a -file, we load the file with `++liar` and coerce the type with `++lake`. -Otherwise, we check to see if we have a hook file here. If so, we parse -it with `++fade`, compile it with `++abut:meow`, and coerce the type -with `++lake`. - -Otherwise, there is no way to translate this, so we produce null. - -`++fest` is one line, so we'll get that one out of the way first. - - ++ fest |*([a=cafe b=*] (fine a [~ u=b])) :: bolt to unit - -This is just `++some` for bolts. - -We've delayed the discussion of `++fade` far too many times. It's not -complicated, we just wanted to spare a premature discussion of `++make` -and the `%bake` silk. We 're now able to discuss everything in `++fade` -with ease. - - ++ fade :: compile to hood - |= [cof=cafe for=mark bem=beam] - ^- (bolt hood) - %+ cool |.(leaf/"ford: fade {<[(tope bem)]>}") - %+ cope (make cof [%bake for bem ~]) - |= [cof=cafe cay=cage] - %+ (clef %hood) (fine cof bem cay) - ^- (burg (pair beam cage) hood) - |= [cof=cafe bum=beam cay=cage] - =+ rul=(fair bem) - ?. ?=(@ q.q.cay) - (flaw cof ~) - =+ vex=((full rul) [[1 1] (trip q.q.cay)]) - ?~ q.vex - (flaw cof [%leaf "syntax error: {} {}"] ~) - (fine cof p.u.q.vex) - -We first push a line onto a stack trace to say that we're parsing into a -hood file. - -We bake the given beam with the given mark and no heel. Recall that -baking gate, core, door, hoon, and hook files produces simply an atom of -the text. We check to make sure that our value is an atom, failing -otherwise. - -The parsing step is run within `++clef` so that the result is cached. We -call `++fair` with the current beam to generate the parsing rule, and we -parse the file. If parsing fails, we fail giving a syntax error with the -line and column number. Otherwise, we produce the value. - - ++ liar :: load vase - |= [cof=cafe bem=beam] - ^- (bolt vase) - =+ von=(ska %cx (tope bem)) - ?~ von - [p=*cafe q=[%1 [[bem ~] ~ ~]]] - ?~ u.von - (flaw cof (smyt (tope bem)) ~) - (fine cof ?^(u.u.von [%cell %noun %noun] [%atom %$]) u.u.von) - -This takes a beam and loads the file at that location. If our sky -function produces null, that means the resource is currently -unavailable, so we block on it. If it produces `[~ ~]`, that means our -resource is permanently unavailable, so we produce an error. Otherwise, -we produce the value there with a type of either a cell of two nouns or -an atom, depending on whether the value is a cell or not. - -Back in `++lima`, recall that we call `++lion` to find a translation -path. - - ++ lion :: translation search - |= [cof=cafe too=@tas bek=beak fro=(list ,@tas)] - ^- (bolt (unit (list ,@tas))) - =| war=(set ,@tas) - =< -:(apex (fine cof fro)) - |% - ++ apex - |= rof=(bolt (list ,@tas)) - ^- [(bolt (unit (list ,@tas))) _+>] - ?. ?=(%0 -.q.rof) [rof +>.$] - ?~ q.q.rof - [[p.rof [%0 p.q.rof ~]] +>.$] - =^ orf +>.$ (apse cof i.q.q.rof) - ?. ?=(%0 -.q.orf) - [orf +>.$] - ?~ q.q.orf - $(cof p.orf, q.q.rof t.q.q.rof) - [[p.orf [%0 (grom p.q.rof p.q.orf) q.q.orf]] +>.$] - :: - ++ apse - |= [cof=cafe for=@tas] - ^- [(bolt (unit (list ,@tas))) _+>] - ?: =(for too) - [(fine cof [~ too ~]) +>.$] - ?: (~(has in war) for) [(fine cof ~) +>] - =. war (~(put in war) for) - =^ hoc +>.$ (apex (lily cof for bek)) - :_ +>.$ - %+ cope hoc - |= [cof=cafe ked=(unit (list ,@tas))] - (fine cof ?~(ked ~ [~ for u.ked])) - -- - -At a high level, we have `++apex` and `++apse`. `++apex` takes a list of -marks to try in succession until we find one that can be translated into -the target mark. On each one, it calls `++apse`, which takes a single -mark and tries to find a translation path from this mark to the target. -To do this, it sees which marks we know how to directly translate to, -and calls `++apex` on this list. The result of this mututal recursion is -a depth-first search of the translation graph to find the target mark. -Since the translation graph is not necessarily acyclic, we maintain a -set of marks that we've already tried. - -We kick off our search in `++apex`, starting with the given initial list -of marks that we know how to get to. - -If `++apex` is called with a bolt other than a `%0` value bolt, we -simply produce it. Otherwise, we check to see if the list of available -marks to investigate is null. If so, then we're done, so we produce a -`%0` bolt with a null list of accessible marks. - -Otherwise, we process this next mark with `++apse`, which will produce a -possible list of marks from this one to the target mark. If it fails to -produce a `%0` bolt, we just produce that. Otherwise, if it produces -null, we can't get to our target through this mark, so we move on to the -next one. - -If it doesn't produce null, then we have successfully found a -translation path, so we produce it. - -In `++apse`, we first test to see if we've arrived at the target path. -If so, we're done, so we produce a list including just ourself. -Otherwise, we check to see if we've already tried this mark. If so, we -know we can't succeed here, so we produce null. Otherwise, we put -ourselves in the set of already-tried marks, and we move on. - -We call `++lily` to get the list of marks we can translate this one -into. - - ++ lily :: translation targets - |= [cof=cafe for=mark bek=beak] - ^- (bolt (list ,@tas)) - =+ raf=(fang cof for bek) - ?: =(%2 -.q.raf) (fine p.raf ~) - %+ cope raf - |= [cof=cafe vax=vase] - %+ fine cof - %+ weld - ^- (list ,@tas) - ?. (slab %garb p.vax) ~ - =+ gav=((soft (list ,@tas)) q:(slap vax [%cnzy %garb])) - ?~(gav ~ u.gav) - ?. (slab %grow p.vax) ~ - =+ gow=(slap vax [%cnzy %grow]) - (sloe p.gow) - -We call `++fang` to get the mark definition door. This is documented -under `%vale`. If getting the mark fails, we produce null because we -can't translate a non-existent mark into anything. - -Otherwise, we examine the door. The door may have a `++garb`, which is -simply a list of marks which know how to translate from the current one. -There must be a corresponding `++grab` in the definition of the other -mark, though we don't check that here. - -The door may also have a `++grow`, which defines how to translate this -mark into another one. Each arm in `++grow` is the name of a mark we can -translate into. The call to `++sloe` simply produces a list of arm names -in `++grow`. - -Back in `++apse:lion`, we take the list of translation targets we just -found and call `++apex` on it. If we got back a null, we produce a null; -otherwise, we produce the list of marks we got back plus the current -mark. - -This concludes our discussion of `++lion`. - -The final piece of `++lima` is `++lope`, which performs the actual -translation along the path we just computed. - - ++ lope :: translation pipe - |= [cof=cafe for=mark yaw=(list mark) bek=beak vax=vase] - ^- (bolt vase) - ?~ yaw (fine cof vax) - %+ cope (link cof i.yaw for bek vax) - |= [cof=cafe yed=vase] - ^$(cof cof, for i.yaw, yaw t.yaw, vax yed) - -We iterate through our list, calling `++link` on every adjacent pair of -marks, translating from one mark to the next until we finish the list of -marks. A call to `++link` is equivalent to a `%cast` silk, so we -document it there. After we've called performed every step in the -translation pipeline, we're done. - -Lifecycle of a `%boil` ----------------------- - - %boil - %+ cool |.(leaf/"ford: boil {} {<(tope q.kas)>} {}") - %+ cope (lamp cof q.kas) - |= [cof=cafe bem=beam] - %+ cope (lime cof p.kas bem r.kas) - |= [cof=cafe vax=vase] - (fine cof `cage`[p.kas vax]) - -At a high level, we try to bake at the given beam, and if it fails, we -go up a level and try again. This is the usual semantics of ford, and -this should nearly always be preferred over directly baking. - -First, we normalize the version case to a number with `++lamp`. This -allows caching to be based on revision number rather than something more -ephemeral like a particular time. - - ++ lamp :: normalize version - |= [cof=cafe bem=beam] - ^- (bolt beam) - =+ von=(ska %cw (tope bem(s ~))) - ?~ von [p=cof q=[%1 [bem ~] ~ ~]] - (fine cof bem(r [%ud ((hard ,@) (need u.von))])) - -We call the sky function with `%cw`, asking clay for the revision number -at this case. If the case refers to a revision that isn't there yet, we -produce a `%1` blocking bolt. Otherwise, we require that the value exist -and that it's a number, both of which are guaranteed by clay. We produce -this number. - -Next for `%boil` we call `++lime` to try to load the beam. - - ++ lime :: load beam - |= [cof=cafe for=mark bem=beam arg=heel] - =+ [mob=bem mer=(flop arg)] - |- ^- (bolt vase) - %+ cope (lima cof for mob (flop mer)) - |= [cof=cafe vux=(unit vase)] - ?^ vux (fine cof u.vux) - ?~ s.mob - (flaw cof (smyt (tope bem)) ~) - ^$(s.mob t.s.mob, mer [i.s.mob mer]) - -We start at the given beam and try to bake it. If it succeeds, we're -good. Otherwise, we pop off the top level of the path and put it in our -heel (virtual path extension). We do this recursively until either we -find something we can bake or we've gone all the way up to the root path -of the desk, in which case we fail. - -Lifecycle of a `%call` ----------------------- - - %call - %+ cool |.(leaf/"ford: call {<`@p`(mug kas)>}") - %. [cof p.kas q.kas] - ;~ cope - ;~ coax - |=([cof=cafe p=silk q=silk] ^$(cof cof, kas p)) - |=([cof=cafe p=silk q=silk] ^$(cof cof, kas q)) - == - :: - |= [cof=cafe gat=cage sam=cage] - (maul cof q.gat q.sam) - :: - |= [cof=cafe vax=vase] - (fine cof %noun vax) - == - -This is slam for silks. We process both of the given silks in parallel -with `++coax`. We then slam the two produced vases together with -`++maul` and mark the produced vase with `%noun` since we don't know any -more specific mark. - -`++coax` is documented under Lifecycle of a Cell. - -Lifecycle of a `%cast` ----------------------- - - %cast - %+ cool |.(leaf/"ford: cast {}") - %+ cope $(kas q.kas) - |= [cof=cafe cay=cage] - %+ cope (link cof p.kas p.cay [our %main %da now] q.cay) - |= [cof=cafe vax=vase] - (fine cof [p.kas vax]) - -This is a request to convert data of one mark to another mark directly. -We evaluate the given silk and pass the result into `++link`, which -performs the actual translation. Note that this will not search for -indirect conversion paths, so the conversion must be defined either in -the `++grow` of the given mark or the `++grab` of the target mark. - - ++ link :: translate - |= [cof=cafe too=mark for=mark bek=beak vax=vase] - ^- (bolt vase) - ?: =(too for) (fine cof vax) - ?: |(=(%noun for) =(%$ for)) - ((lake too bek) cof vax) - %+ cope (fang cof for bek) - |= [cof=cafe pro=vase] - ?: &((slab %grow p.pro) (slab too p:(slap pro [%cnzy %grow]))) - %+ cope (keel cof pro [[%& 6]~ vax]~) - |= [cof=cafe pox=vase] - (maim cof pox [%tsgr [%cnzy %grow] [%cnzy too]]) - %+ cope (fang cof too bek) - |= [cof=cafe pro=vase] - =+ ^= zat ^- (unit vase) - ?. (slab %grab p.pro) ~ - =+ gab=(slap pro [%cnzy %grab]) - ?. (slab for p.gab) ~ - `(slap gab [%cnzy for]) - ?~ zat - (flaw cof [%leaf "ford: no link: {<[for too]>}"]~) - (maul cof u.zat vax) - -This performs one step in the translation pipeline. If the given and -target marks are the same, we're done. If we're translating from a noun -or the empty mark, we coerce with `++lake` (documented in `%vale`). -Otherwise, we're translating from a user-defined mark. - -We load the definition of the given mark with `++fang`, and we check to -see if it has an arm in `++grow` named the target mark. If so, we place -our data in the sample of the door with `++keel` and slap the arm. -`++keel` is equivalent to a `%mute` silk, so we document it there. - -If there is no arm in `++grow` of the given mark named the target mark, -we suppose there must be an arm in `++grab` of the target mark named the -given mark. We get the definition of the target mark and check to see if -it has the required arm, failing if it doesn't. Finally, we slam the -data against the correct arm, producing the translated data. - -If you're confused as to why the handling of `++grow` and `++grab` look -superficially so different, remember that the correct arm in `++grow` -does not have a sample while the one in `++grab` does. This means they -must be called rather differently. - -Lifecycle of a `%diff` ----------------------- - - %diff - %+ cool |.(leaf/"ford: diff {<`@p`(mug p.kas)>} {<`@p`(mug q.kas)>}") - (diff cof p.kas q.kas) - -We push debug information onto the trace and go right to `++diff`. - - ++ diff - |= [cof=cafe kas=silk kos=silk] - ^- (bolt cage) - %. [cof kas kos] - ;~ cope - ;~ coax - |=([cof=cafe p=silk q=silk] (make cof p)) - |=([cof=cafe p=silk q=silk] (make cof q)) - == - |= [cof=cafe cay=cage coy=cage] - -First, we process the two given silks to get our arguments. - - ?. =(p.cay p.coy) - %+ flaw cof :_ ~ - leaf/"diff on data of different marks: {(trip p.cay)} {(trip p.coy)}" - -If the two cages have different marks, then we can't diff them, so we -complain. - - %+ cope (fang cof p.cay [our %main %da now]) - |= [cof=cafe pro=vase] - -We pull in the relevant mark's definition. - - ?. (slab %grad p.pro) - (flaw cof leaf/"no ++grad" ~) - =+ gar=(slap pro [%cnzy %grad]) - ?. (slab %form p.gar) - ?. (slab %sted p.gar) - (flaw cof leaf/"no ++form:grad nor ++sted:grad" ~) - =+ for=((soft ,@tas) q:(slap gar [%cnzy %sted])) - ?~ for - (flaw cof leaf/"bad ++sted:grad" ~) - (make cof %diff [%cast u.for kas] [%cast u.for kos]) - -If there's no `++grad`, we complain. If there's no `++form:grad`, then -we look for a `++sted:grad`. If we can't find either, or if -`++sted:grad` isn't a term, then we complain. If `++sted:grad` exists -and is a term, then it represents the mark we should use as a proxy to -get our diff. So, we cast both our given cages to the new mark and start -the dance again. - - ?. (slab %diff p.gar) - (flaw cof leaf/"no ++diff:grad" ~) - -Otherwise, we expect a `++diff:grad`. - - %+ cope (keel cof pro [[%& 6]~ q.cay]~) - |= [cof=cafe pox=vase] - -We put the first cage's data into the sample of the given mark's -definition. - - %+ cope - %^ maul cof - (slap (slap pox [%cnzy %grad]) [%cnzy %diff]) - q.coy - |= [cof=cafe dif=vase] - -We run `++diff:grad` with a sample of the second cage's data. - - =+ for=((soft ,@tas) q:(slap gar [%cnzy %form])) - ?~ for - (flaw cof leaf/"bad ++form:grad" ~) - (fine cof u.for dif) - == - -We check that `++form:grad` exists, and we tag the result with it to -give the final cage. - -Lifecycle of a `%done` ----------------------- - - %done [cof %0 p.kas q.kas] - -This is trivial. We simply produce the given cage with the given set of -dependencies. This is used when we already have a cage that we want to -insert into another silk that requires a silk argument. It's analogous -to the return operator in a monad -- which makes it sound way more -complicated than it is. - -Lifecycle of a `%dude` ----------------------- - - %dude (cool |.(p.kas) $(kas q.kas)) - -This simply puts a given tank on the stack trace if the given silk -produces an error. This is implemented as a simple call to `++cool`. - -Lifecycle of a `%dune` ----------------------- - - %dune - ?~ q.kas [cof [%2 [%leaf "no data"]~]] - $(kas [%done p.kas u.q.kas]) - -This is a sort of a `++need` for silks. If there is no data in the unit -cage, we produce an error. Else, we simply produce the data in the cage. - -Lifcycle of a `%mute` ---------------------- - - %mute (kale cof p.kas q.kas) - -This mutates a silk by putting the values of other silks at particular -axes. This is useful in, for example, replacing the sample of the door -in a mark definition. - - ++ kale :: mutate - |= [cof=cafe kas=silk muy=(list (pair wing silk))] - ^- (bolt cage) - %+ cope - |- ^- (bolt (list (pair wing vase))) - ?~ muy (fine cof ~) - %+ cope (make cof q.i.muy) - |= [cof=cafe cay=cage] - %+ cope ^$(muy t.muy) - |= [cof=cafe rex=(list (pair wing vase))] - (fine cof [[p.i.muy q.cay] rex]) - |= [cof=cafe yom=(list (pair wing vase))] - %+ cope (make cof kas) - |= [cof=cafe cay=cage] - %+ cope (keel cof q.cay yom) - |= [cof=cafe vax=vase] - (fine cof p.cay vax) - -First, we process each of the silks by calling `++make` on them. We pass -the resultant vase and list of pairs of wings and silks to `++keel` to -do the actual mutation. We assume the mutation doesn't change the mark -of the main silk, so we mark the produced vase with the original mark. - - ++ keel :: apply mutations - |= [cof=cafe suh=vase yom=(list (pair wing vase))] - ^- (bolt vase) - %^ maim cof - %+ slop suh - |- ^- vase - ?~ yom [[%atom %n] ~] - (slop q.i.yom $(yom t.yom)) - ^- twig - :+ %cncb [%& 2]~ - =+ axe=3 - |- ^- (list (pair wing twig)) - ?~ yom ~ - :- [p.i.yom [%$ (peg axe 2)]] - $(yom t.yom, axe (peg axe 3)) - -We first put the vases together in one big tuple starting with the -subject and going through the mutations. We slap against this tuple a -`%_` twig we directly construct. Since a `%_` twig takes a list of pairs -of wings and twigs, we simply have to generate twigs referring to the -correct axes in the subject. This is very easy since we just recur on -axis 3 of whatever axis we were already at. - -Note the use of `%_` instead of `%=` enforces that our mutations don't -change the type of the subject, which justifies our use of the original -mark. - -Lifecycle of a `%pact` ----------------------- - - %pact - %+ cool |.(leaf/"ford: pact {<`@p`(mug p.kas)>} {<`@p`(mug q.kas)>}") - (pact cof p.kas q.kas) - -We push debug information onto the trace and go right to `++pact`. - - ++ pact :: patch - |= [cof=cafe kas=silk kos=silk] - ^- (bolt cage) - %. [cof kas kos] - ;~ cope - ;~ coax - |=([cof=cafe p=silk q=silk] (make cof p)) - |=([cof=cafe p=silk q=silk] (make cof q)) - == - |= [cof=cafe cay=cage coy=cage] - -First, we process the two given silks to get our arguments. - - %+ cope (fang cof p.cay [our %main %da now]) - |= [cof=cafe pro=vase] - -We pull in the relevant mark's definition. - - ?. (slab %grad p.pro) - (flaw cof leaf/"no ++grad" ~) - =+ gar=(slap pro [%cnzy %grad]) - ?. (slab %form p.gar) - ?. (slab %sted p.gar) - (flaw cof leaf/"no ++form:grad nor ++sted:grad" ~) - =+ for=((soft ,@tas) q:(slap gar [%cnzy %sted])) - ?~ for - (flaw cof leaf/"bad ++sted:grad" ~) - (make cof %cast p.cay %pact [%cast u.for kas] kos) - -If there's no `++grad`, we complain. If there's no `++form:grad`, then -we look for a `++sted:grad`. If we can't find either, or if -`++sted:grad` isn't a term, then we complain. If `++sted:grad` exists -and is a term, then it represents the mark we should use as a proxy to -get our diff. So, we cast the first argument to the new mark, then try -to patch. Afterward, we cast the result back to the original mark. - - =+ for=((soft ,@tas) q:(slap gar [%cnzy %form])) - ?~ for - (flaw cof leaf/"bad ++form:grad" ~) - ?. =(u.for p.coy) - %+ flaw cof :_ ~ - =< leaf/"pact on data with wrong form: {-} {+<} {+>}" - [(trip p.cay) (trip u.for) (trip p.coy)] - -If `++form:grad` isn't a term, or else our second argument isn't of that -mark, we complain. - - ?. (slab %pact p.gar) - (flaw cof leaf/"no ++pact:grad" ~) - -If we don't have a `++pact:grad`, we complain. - - %+ cope (keel cof pro [[%& 6]~ q.cay]~) - |= [cof=cafe pox=vase] - -We put the first cage's data into the sample of the given mark's -definition. - - %+ cope - %^ maul cof - (slap (slap pox [%cnzy %grad]) [%cnzy %pact]) - q.coy - |= [cof=cafe pat=vase] - -We run `++pact:grad` with a sample of the second cage's data, which is -the diff. - - (fine cof p.cay pat) - == - -We tag the result with the mark of our first argument. - -Lifecycle of a `%plan` ----------------------- - - %plan - %+ cope (abut:(meow p.kas q.kas) cof r.kas) - |= [cof=cafe vax=vase] - (fine cof %noun vax) - -This is a direct request to compile a hood at a given beam with a heel -of the given path. We comply by calling `++abut` with the given -arguments and producing the vase with a mark of `%noun`. - -Lifecycle of a `%reef` ----------------------- - - %reef (fine cof %noun pit) - -This is one of the simplest silks. We simply produce our context, which -is zuse compiled against hoon. The mark is a `%noun`. - -Lifcycle of a `%ride` ---------------------- - - %ride - %+ cool |.(leaf/"ford: ride {<`@p`(mug kas)>}") - %+ cope $(kas q.kas) - |= [cof=cafe cay=cage] - %+ cope (maim cof q.cay p.kas) - |= [cof=cafe vax=vase] - (fine cof %noun vax) - -This slaps evaluates the given silk, then it slaps the result against -the given twig. Since we don't know what of what mark (if any) is the -result, we give it a mark of `%noun`. - -Lifecycle of a `%vale` ----------------------- - - %vale - %+ cool |.(leaf/"ford: vale {} {} {<`@p`(mug r.kas)>}") - %+ cope (lave cof p.kas q.kas r.kas) - |= [cof=cafe vax=vase] - (fine cof `cage`[p.kas vax]) - -This checks whether given data is of the given mark. If we don't have -the definition of the mark, we check the given ship for it. - -We call `++lave` to perform the check, producing a vase. We produce this -vase tagged with the given mark. - - ++ lave :: validate - |= [cof=cafe for=mark his=ship som=*] - ^- (bolt vase) - ((lake for [our %main [%da now]]) cof [%noun som]) - -This is a thinly-veiled wrapper over `++lake`. Note that, contrary to -documented opinion, we do not in fact check the other ship's definition -of a mark. This is likely a bug. - -At any rate, `++lake` coerces a noun into the correct type for a mark. - - ++ lake :: check/coerce - |= [for=mark bek=beak] - |= [cof=cafe sam=vase] - ^- (bolt vase) - %+ cool |.(leaf/"ford: check {<[for bek `@p`(mug q.sam)]>}") - ?: ?=(?(%gate %core %door %hoon %hook) for) - :: ~& [%lake-easy for bek] - (fine cof sam) - %+ cope (fang cof for bek) - |= [cof=cafe tux=vase] - =+ bob=(slot 6 tux) - ?: (~(nest ut p.bob) | p.sam) - (fine cof sam) - ?. (slab %grab p.tux) - (flaw cof [%leaf "ford: no grab: {<[for bek]>}"]~) - =+ gab=(slap tux [%cnzy %grab]) - ?. (slab %noun p.gab) - (flaw cof [%leaf "ford: no noun: {<[for bek]>}"]~) - %+ cope (maul cof (slap gab [%cnzy %noun]) [%noun q.sam]) - |= [cof=cafe pro=vase] - ?: =(+<.q.pro q.sam) - (fine cof (slot 6 pro)) - (flaw cof [%leaf "ford: invalid content: {<[for bek]>}"]~) - -This is going to coerce the sample into the correct type for the mark. -First, we push a line onto the stack trace saying that we're checking -the type. If the requested mark is a gate, core, door, hoon, or hook, -then we don't do any more type information than just saying it's a noun, -so we're done. - -Otherwise, we get the mark definition from our `/=main=/mar` directory -with `++fang`, which we'll describe below. - -We check to see if our sample type nests within the type of the sample -to the door. If so, then we're already of the correct type, so we're -done. - -Otherwise, we check to see if there's a `++grab` in the door, and a -`++noun` in the `++grab`. If not, there's no way we can translate to -this mark, so we fail. - -If we have everything we need, we slam our sample (typed as a noun) -against the `++noun` in `++grab`. If the sample of the door is the same -as our sample, then the check succeeded, so we produce the well-typed -sample of the door. Otherwise, we fail. - - ++ fang :: protocol door - |= [cof=cafe for=mark bek=beak] - ^- (bolt vase) - =+ pax=/door/[for]/mar - =+ ^= bem ^- beam - :_ pax - ?: =(p.bek our) bek - =+ oak=[our %main %da now] - ?. =(~ (ska %cy (tope [oak pax]))) oak - bek - (cope (fade cof %hook bem) abut:(meow bem ~)) - -A mark's definition is generally in -`/=main=/mar/[mark-name]/door/hook'. If we don't find it there, we look in`/[given-beak]/mar/[mark-name]/door/hook'. -We parse the mark definition with `++fade` and assemble it with -`++abut:meow`. `++fade` is defined under the `%bake` silk. diff --git a/pub/doc/arvo/gall.md b/pub/doc/arvo/gall.md deleted file mode 100644 index c404b43e8c..0000000000 --- a/pub/doc/arvo/gall.md +++ /dev/null @@ -1,161 +0,0 @@ -
- -`%gall` -======= - -Our application server manager. - -It allows applications and vanes to send messages to applications and -subscribe to data streams. This requires `%gall` to be a sort of a -hypervisor. Messages coming into `%gall` are routed to the intended -application, and the response comes back along the same route. If the -intended target is on another ship, `%gall` will behind-the-scenes route -it through ames to the other ship to run. This provides an abstraction -where all apps on all ships are communicated with over the same -interface. - -`%gall` neither accepts events from unix nor produces effects. It exists -entirely for the benefit of other vanes and, in particular, -applications. Eyre exposes `%gall`'s interface over http, and ames does -the same over the ames network. `%gall` uses ford to compile and run the -applications. - -
- ------------------------------------------------------------------------- - -Cards -===== - -`%gall` accepts the following cards. The first three are the most -commonly used, while the others are primarily used internally. - -`%mess` -================ - -Sends a message to an app. This will result in a call to the app's -`++poke` arm. The response is exactly one of a `%nice` if the action -succeeded or a `%mean` if not. - ------------------------------------------------------------------------- - -`%show` -================ - -Subscribes to a stream from an app. This will result in a call to the -app's either `++peek` or `++peer` arm. The first response will always be -either a `%nice` or a `%mean`, indicating whether or not the -subscription was successful. After the first response, there will be -zero or more responses of either `%rush` or `%rust`, which communicate -either a differntial or full update to the data stream. There may be a -`%mean`, which indicates that the subscription has been canceled and no -more responses will be received along this stream. - ------------------------------------------------------------------------- - -`%nuke` -================ - -Unsubscribes the current duct from its stream. This receives a response -of either a `%nice` or a `%mean`. Note that a response of `%nice` does -not imply that the current duct was in fact subscribed to any stream. - ------------------------------------------------------------------------- - -`%init` -================ - -Initializes a ship's apps. This should be called exactly once for each -ship on the pier. This produces no moves in response. - ------------------------------------------------------------------------- - -`%sire` -================ - -Instantiates a child app. The app will be at path `[p parent-path]`, and -it will be an instance of the `q` app. The only response will be `%gone` -when the child dies. - ------------------------------------------------------------------------- - -`%rote` -================ - -Signifies a remote request from ames. `r` should be of type `rook`. This -how an app on a foreign ship may send a `%mess`, `%show`, or `%nuke` -card. Note that `%gall` automatically converts `%mess`, `%show`, and -`%nuke` into ames messages behind the scenes, so the only entity that -should use `%rote` and `%roth` is ames. Formally, the response is either -a `%nice` or a `%mean`, which ames uses to give a positive or negative -ack. A logical response comes by passing a `%roth` card. - ------------------------------------------------------------------------- - -`%roth` -================ - -Gives the response received from a remote request. `r` should be of type -`roon`. This is how an app responds to a foreign request with a `%rush`, -`%rust`, `%nice`, or `%mean`. The response is either a `%nice` or a -`%mean`. Even though we, as the proverb goes, "never ack an ack", we do -need to acknowledge these responses since they're really independent -one-way messages. - ------------------------------------------------------------------------- - -`%wipe` -================ - -Wipes the given app from memory. This is generally considered a hack, -but it is sometimes useful during development to wipe the state of an -app. We don't guarantee that this actually completely wipes the app. -Generally, you want to use a `%cide` card if you actually want to kill -an app. This gives no response. - ------------------------------------------------------------------------- - -`%cide` -================ - -Kills an app and all its children. Even though it's not technically a -part of `%gall`'s interface since it's not in `++kiss` and can't be -called from the outside, it's worth mentioning `%cide`, which may be -called from within `%gall` apps. It should call `++part` to allow the -app any last words. This gives no response. - ------------------------------------------------------------------------- - -Service Gates -============= - -[`++poke`]() -============ - -Handles incoming messages. Most commonly with an associated `%logo`. For -example `++poke-json` handles an incoming JSON request from `%eyre`. - -[`++peer`]() -============ - -Handles incoming subscriptions. - -[`++pull`]() -============ - -Handles dropping subscribers. - -[`++pour`]() -============ - -Handles responses to `%pass` moves. - -[`++park`]() -============ - -Save state on update. - -[`++prep`]() -============ - -Load state on update. diff --git a/pub/doc/arvo/gall/gall.md b/pub/doc/arvo/gall/gall.md deleted file mode 100644 index 254e67cb41..0000000000 --- a/pub/doc/arvo/gall/gall.md +++ /dev/null @@ -1,5 +0,0 @@ -Gall: Reference -=============== - -Gall: Commentary -================ diff --git a/pub/doc/arvo/time.md b/pub/doc/arvo/time.md deleted file mode 100644 index 40c016c5ec..0000000000 --- a/pub/doc/arvo/time.md +++ /dev/null @@ -1,21 +0,0 @@ -
- -`%time` -======= - -Our simple timer. - -It allows vanes and applications to set and timer events, which are -managed in a simple priority queue. `%time` produces effects to start -the unix timer, and when the requested `%time` passes, unix sends wake -events to `%time`, which time routes back to original sender. We don't -guarantee that a timer event will happen at exactly the `%time` it was -set for, or even that it'll be particularly close. A timer event is a -request to not be woken until after the given time. - -`%eyre` uses `%time` for timing out sessions, and `%clay` uses `%time` -for keeping track of time-specified file requests. `%ames` should -probably use `%time` to keep track of things like network timeouts and -retry timing, but it currently uses its own alarm system. - -
diff --git a/pub/doc/arvo/util.md b/pub/doc/arvo/util.md deleted file mode 100644 index e28f8809fa..0000000000 --- a/pub/doc/arvo/util.md +++ /dev/null @@ -1,344 +0,0 @@ -
- -CLI Apps -======== - -Our simple command-line applications. - -You can find them in `/main/app`. - -
- ------------------------------------------------------------------------- - -### `:?begin` - -`~zod:dojo> :?begin [~ship-name [~valid-ticket-for-ship]]` - -Start a ship. `:?begin` collects all of the necessary information to -start an Urbit ship. Takes an option `[~ship-name]` or -`[~ship-name [~valid-ticket-for-ship]]` pair. - ------------------------------------------------------------------------- - -### `+cat` - -`~zod:dojo> +cat /path/to/file [...]` - -"cat" a file. `+cat` either prints a file, or concatenates and then -prints multiple files to the terminal. - - ~zod:dojo> +cat %/spec/nock/5/txt - > +cat %/spec/nock/5/txt - /~zod/home/~2015.6.29..22.33.04..fc76/spec/nock/5/txt - A noun is an atom or a cell. - … - ------------------------------------------------------------------------- - -### `|cp` - -`~zod:dojo> |cp /path/to/source /path/to/destination` - -Copy a file to a given location. - - ~zod:dojo> |cp %/spec/nock/5/txt %/try/6/txt - > |cp %/spec/nock/5/txt %/try/6/txt - + /~zod/home/2/try/6/txt - >= - ------------------------------------------------------------------------- - -### `grep` - -GONE - -`~zod:dojo> :grep 'literal'` -"grep" a file or standard input. Currently only supports a literal cord, -but will eventuall support regular expressions. - ------------------------------------------------------------------------- - -### `|hi` - -`~zod:dojo> |hi ~ship ["message"]` - -Send a ship a message which is empty by default, becoming their neighbor -in the process. Often used to ping ships to check connectivity. - - - ~zod:dojo> |hi ~doznec - > |hi ~doznec - ames: czar zod.urbit.org: ip .192.241.195.84 - >= - hi ~doznec succesful - ; ~doznec is your neighbor - ; ~doznec is your neighbor - -and on ~doznec - - ~doznec:dojo> - < ~zod: - ; ~zod is your neighbor - - -send a message - - ~zod:dojo> |hi ~doznec "say something" - >= - hi ~doznec succesful - -and on ~doznec - - < ~zod: say something - - ------------------------------------------------------------------------- - -### `:into` - -GONE - -`~zod:dojo> :into /path/to/file 'contents'` - -Write text to a file. If the specified file does not exist, create a -file by that name. If it does exist, replace its contents. - - ------------------------------------------------------------------------- - -### `|label` - -GONE? returns file not found - -`~zod:dojo> |label %path %label` - -"label". Add a label to a change number. - - ~zod:dojo> |label %try %zebra - = new /~zod/try/3 - ~zod:dojo> :ls /=try/zebra - readme - -Note that adding a label is part of the delta stream and creates a new -change number, `3`. - - ------------------------------------------------------------------------- - -### `+ls` - -`~zod:dojo> :+ls path/to/directory` - -"ls". List files at a path. Unlike "ls" in Unix, the current path `%` -must be explicitly given (you cannot call `+ls` with no arguments to -display the files at the current path). - - ~zod:dojo> +ls %try - > +ls %/try/ - readme/md - ------------------------------------------------------------------------- - -### `|mount` - -`~zod:dojo> |mount /path/to/directory/version %mount-point` - -Your files are not synced to unix by default. -To sync a subtree to unix, run `|mount /path/to/directory %mount-point`. -This will sync it into `/.` -If you want to sync your whole home desk into f0/home, for example, -run `|mount % %home` You can also [`|unmount`](). - - ~zod:dojo> |mount /~zod/base/0 %base - > |mount /~zod/base %base - >= - ------------------------------------------------------------------------- - -### `|mv` - -`~zod:dojo> |mv /path/to/source /path/to/destination` - -Move a file to a given location, creating a new revision of the source -that omits the moved file. - - ~zod:dojo> |mv %/try/6/txt %/try/7/txt - > |mv %/try/6/txt %/try/7/txt - + /~zod/home/3/try/7/txt - >= - ------------------------------------------------------------------------- - -### `|reload` - -`~zod:dojo> |reload %vane-name [...]` - -Reload the standard library (zuse) and/or arvo vanes. If zuse is -reloaded, vanes depending on the changes must be reloaded as well. For -example `|reload %zuse %ford` is necessary to make use of changes in -application code or the REPL. - -Possible values for %vane-name see [Overview](overview "overview"): - - ~zod:dojo> |reload %zuse - [%tang /~zod/home/~2015.6.29..23.50.29..134d/arvo/zuse ~hillyx-salhet] - > |reload %zuse - >= - ------------------------------------------------------------------------- - -### `|reset` - -`~zod:dojo> |reset` - -Reloads all vanes. See [`|reload`]() for reloading only or a specific vane. - - ~zod:dojo> |reset - [%vega-start /~zod/home/~2015.6.29..23.51.42..f335/arvo/hoon] - %vega-parsed - [%vega-compiled %163 163] - %hoon-load - [%tang /~zod/home/~2015.6.29..23.51.42..f335/arvo/zuse ~hillyx-salhet] - [%vane %a /~zod/home/~2015.6.29..23.51.42..f335/arvo/ames ~tilwyl-talren] - %ames-reload - [%vane %c /~zod/home/~2015.6.29..23.51.42..f335/arvo/clay ~molmur-panlus] - [%vane %d /~zod/home/~2015.6.29..23.51.42..f335/arvo/dill ~sicbet-miphes] - [%vane %e /~zod/home/~2015.6.29..23.51.42..f335/arvo/eyre ~solrux-sibnep] - [gub=30 hov=19 ged=18 ded=1 pox=1 ask=1 kes=1 ney=35 dop=1 liz=1 wup=1 sop=1 wix=1] - [%vane %f /~zod/home/~2015.6.29..23.51.42..f335/arvo/ford ~librem-sopseg] - [%vane %g /~zod/home/~2015.6.29..23.51.42..f335/arvo/gall ~sidsub-fasrev] - [%vane %t /~zod/home/~2015.6.29..23.51.42..f335/arvo/time ~ritwyn-lanrev] - > |reset - <<>> - >= - ------------------------------------------------------------------------- - -### `|rm` - -`~zod:dojo> |rm /path/to/source` - -Remove a file. - - ~zod:dojo> |rm %/try/7/txt - >= - ------------------------------------------------------------------------- - -### `+solid` - -`~zod:dojo> +solid` - -compiles a kernel into a new full urbit.pill - ------------------------------------------------------------------------- - -### `|sync` - -`~zod:dojo> |sync %source-desk ~hidduc-posmeg %target-desk` - -Sets up a subscription to the source desk on the target ship name to the -target desk on your ship. - ------------------------------------------------------------------------- - -### `+ticket` - -`~zod:dojo> +ticket ~ship-name` - -Creates a will for a ship. `+ticket` outputs the ticket for a Urbit -ship. Takes an option `[~ship-name]`. On destroyes this command creates -a yacht and takes the option \`[\~yacht-name-destroyer-name] - ------------------------------------------------------------------------- - -### `:thumb` - -GONE - -`~zod:dojo> :thumb ~ship-name` - -Show the ships information. Only works if you issued a [`:hi`] -[\`\~ship-name] beforehand. - -This command is not avaible since the switch from batz to `%gall`! - -Use this for the time beeing: - will: -`~zod/try=> ((hard (unit gcos)) .^(%a /=gcos=/~ship-name))` - raw will: -`~zod/try=> ((hard will) .^(%a /=will=/~ship-name))` - - ------------------------------------------------------------------------- - -### `|unmount` - -`~zod:dojo> |unmount /path/to/directory` - -Your files are not synced to unix by default. -To sync a subtree to unix, run `|mount`. -You can unmount with either `|unmount /path/to/directory` -or `|unmount %mount-point`. - - ~zod:dojo> |unmount %base - > |unmount %base - >= - ------------------------------------------------------------------------- - -### `|unsync` - -`~zod:dojo> |unsync %source-desk ~hidduc-posmeg %target-desk` - -Cancels the subscription to the source desk on the target ship name to -the target desk on your ship. - ------------------------------------------------------------------------- - -### `|verb` - -`~zod:dojo> |verb` - -Turn verbose arvo mode on/off. - -You'll see events, internal cards, and effects. - - [%unix p=%wake //temp] - [ %give - %t - %wake - ~[ - /c/tyme - /g/a/~zod/._~~.58_~~.shell_~~.terminal__/w/drug/~zod/main - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/began/u - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/._~~.2_~~.shell_~~.terminal__/u/to-gan - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/._~~.shell_~~.terminal__/u/child/2/main - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/terminal/u/txt - /d/term-mess - //term/1 - ] - ] - [ %give - %c - %writ - ~[ - /g/a/~zod/._~~.58_~~.shell_~~.terminal__/w/drug/~zod - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/began/u - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/._~~.2_~~.shell_~~.terminal__/u/to-gan - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/._~~.shell_~~.terminal__/u/child/2/main - /g/a/~harnyr-darlux-bitrux-litnum--falbec-tacsev-magdus-tobsyn/terminal/u/txt - /d/term-mess - //term/1 - ] - ] - ... - ------------------------------------------------------------------------- - -### `|ye` - -`~zod:dojo> |ye ["message"]` - -Send a message to all ships. Often used to announce a continuity breach. - ------------------------------------------------------------------------- diff --git a/pub/doc/hoon.mdy b/pub/doc/hoon.mdy deleted file mode 100644 index c0dfeabf98..0000000000 --- a/pub/doc/hoon.mdy +++ /dev/null @@ -1,28 +0,0 @@ ---- -sort: 1 ---- - -
- -hoon -==== - -hoon is our programming language. - -hoon is a strict, typed, functional language that compiles itself to -nock. The hoon compiler is 4000 lines of hoon. Adding standard -libraries, the self-compiling kernel is 8000 lines. The hoon compiler is -located towards the bottom of `/=main=/arvo/hoon.hoon`. The standard -library is split between `/=main=/arvo/hoon.hoon` and -`/=main=/arvo/zuse.hoon`. - -hoon has no particular familial relationship to other languages you may -know. It uses its own type inference algorithm and is as different from -Haskell as from Lisp. hoon syntax is also completely unfamiliar. hoon -uses ascii digraphs, which we call 'runes', instead of reserved words. - -
- ------------------------------------------------------------------------- - - diff --git a/pub/doc/interpreter.mdy b/pub/doc/interpreter.mdy deleted file mode 100644 index 4c0f3d951f..0000000000 --- a/pub/doc/interpreter.mdy +++ /dev/null @@ -1,16 +0,0 @@ ---- -sort: 3 ---- - -
- -Interpreter -========== - -The urbit interpreter and C code. - -
- ------------------------------------------------------------------------- - - diff --git a/pub/doc/interpreter/glossary.md b/pub/doc/interpreter/glossary.md deleted file mode 100644 index cd542e158d..0000000000 --- a/pub/doc/interpreter/glossary.md +++ /dev/null @@ -1,345 +0,0 @@ -
- -Glossary -======== - -### arm - -A key-value pair of a name ([++term]()) to an expression ([++foot()]). -Used primarily in [core]() construction. Arms can contain either -functions or data. You can think of them like named properties inside an -object. - ------------------------------------------------------------------------- - -### atom - -An atom is a natural number. More here..? - ------------------------------------------------------------------------- - -### axil - -An `[%axil p=base]` is a simple [`++tile`]() for a few basic icons: an -atom of any odor, a noun (`*`) , a cell of nouns (`^`), a loobean (`?`), -and null (`~`). - ------------------------------------------------------------------------- - -### battery - -[Cores], at the most basic level, are of the structure [battery -payload]. The battery consists of the code contained within a core. - ------------------------------------------------------------------------- - -### `%bark` - -A `[%bark p=term q=tile]` is a [`++tile`]() with a name wrapped around -it. Its [icon]() is a [`++face`](). The rune associated with a -[`%bark`]() is [`$=`](). - ------------------------------------------------------------------------- - -### bunt - -The bunt of a [`++tile`]() produces a [`++twig`]() that creates a blank -default example its [icon](). Bunting is like explicitly asking for the -default value of a type. Unlike in other languages, this always exists -in Hoon. See also [`$*`](). - ------------------------------------------------------------------------- - -### `%bush` - -a [`[%bush p=tile q=tile]`]() is a [`++tile`]() in which there are two -kinds of [nouns](): cells whose head is a cell (`++tile` p) and cells -whose head is an atom (`++tile` q). Its default value is the value of -`q`, and its icon is a [`++fork`]() - ------------------------------------------------------------------------- - -### cell - -A cell is an ordered pair of nouns. - ------------------------------------------------------------------------- - -### clam - -The clam of a [`++tile`]() is a [gate]() that accepts an arbitrary -[noun]() and always produces a member of the [icon]() of the `++tile`. -If the gate is passed a [sample]() that is a member of the icon, it will -produce that sample. If the gate is passed a noun outside of the domain -of the icon, it will produced the [bunt]() of the icon. You can think of -a clam as a validator function for an icon. To clam a `++tile` is to -produce its clam. See also: [`$,`](). SEE ALSO!! - ------------------------------------------------------------------------- - -### context - -In [gate]() construction, an arm is pulled from a [core]() and pushed -onto the subject creating a structure of [formula [sample context]], -where the context is the previous subject, commonly a core. In Hoon, the -whole kernel is typically included in your subject, so you can think of -context in hoon in a similar way to context in the traditional -functional programming sense. - ------------------------------------------------------------------------- - -### cons - -Cell constructor, similar to [cons in other functional -languages](http://en.wikipedia.org/wiki/Cons). Constructs a cell -containing two [`++twigs`]() into a twig that produces a cell of the -results of the two original sub-twigs. - ------------------------------------------------------------------------- - -### core - -At the Nock level, a core is any [subject]() that contains both code and -data, named battery and payload respectively. At the Hoon level, a core -is very similar to an object with named properties that can be either -functions or data. For more information, see the [`|` rune section]() of -the Hoon library. - ------------------------------------------------------------------------- - -### `%cube` - ------------------------------------------------------------------------- - -### door - -A door is a [core]() with a sample. Door are used.../you can think of -doors... - ------------------------------------------------------------------------- - -### dry - -In a dry computation, typechecking occurs at compile-time to ensure that -all inputs match its [sample]() [++tile](). The formal term for dry is -`%ash`. - ------------------------------------------------------------------------- - -### engine - -Engines are [core]()s that contain several [arm]()s that each perform -one of a related set of operations on the core's sample. For example, -there is a container engine for all of the set operations. You can think -of engines as objects with methods that modify its data. - ------------------------------------------------------------------------- - -### `%face` - ------------------------------------------------------------------------- - -### fern - -A `[%fern p=[i=tile t=(list tile)]]` is a [`++tile`]() for a non-empty -list of cases. Its icon is naturally a [`%fork`](). The programmer is -responsible for ensuring that the cases are actually orthogonal (unlike -with the structured `%fork`s, [`%bush`](), [`%kelp`]() and [`%reed`]). - ------------------------------------------------------------------------- - -### fishing - -To fish is to test if a [noun]() matches a specific `++tile`, using the -natural rune [`?=`](). Some languages call fishing "pattern matching". - ------------------------------------------------------------------------- - -### frond - -A frond is a case of a [kelp](), which is a [discriminated (or tagged) -union](http://en.wikipedia.org/wiki/Tagged_union). - ------------------------------------------------------------------------- - -### gate - -A [gate]() is a [core]() with one arm [`$`]() with a [payload]() that is -a cell of the form [[sample]() [context]()]. Gates are the closest thing -Hoon has to functions in the traditional sense. - ------------------------------------------------------------------------- - -### `%gold` - ------------------------------------------------------------------------- - -### herb - -An `[%herb p=twig]`.... - ------------------------------------------------------------------------- - -### icon - -The icon of a [`++tile`]() is the type associated with that `++tile`. A -`++tile` is a convenient way of specifying a type, which is its icon. -`++tile`s are used in a similar way to [type signatures]() for their -icons. - ------------------------------------------------------------------------- - -### `%iron` - -`%iron` is a variance type for [cores]() where their [sample]()s cannot -be read. You can think of can be thought of as similar to a private -function. - -Not quite sure about this one. - ------------------------------------------------------------------------- - -### `%kelp` - -a [`%kelp p=[i=line t=(list line)]`] is a [discriminated, or tagged, -union](http://en.wikipedia.org/wiki/Tagged_union). In Hoon, the head, -which is called the stem, must be a [`%leaf`](). The tail, which can be -anything, is the bulb. Cases of a kelp are known as [fronds](). - ------------------------------------------------------------------------- - -### kick - -To pull the empty name `$` on a core is to kick it. You can think of -kicking like calling a function with its default arguments. - ------------------------------------------------------------------------- - -### noun - -A noun is an [atom]() or a [cell](). Everything in Hoon is a noun. - ------------------------------------------------------------------------- - -### `%$` buc - -`%$`, or `$` for short, is the empty name in Hoon. - ------------------------------------------------------------------------- - -### leg - -If the result of [pulling]() something from `x` is a subtree, then it is -a leg. - -More here? Existing doc isn't quite clear here.. - ------------------------------------------------------------------------- - -### `%lead` - ------------------------------------------------------------------------- - -### `%leaf` - -A `%leaf` is a [`++tile`]() consisting of an atomic constant of value -`q` and odor `p`. Its icon is a [`%cube`](). The syntax for a leaf is -the same as the syntax for a [`++twig`](), except that % is never -required to generate a cube. For instance, as a twig, 7 has a type of -[%atom %ud]; %7 has a type of [%cube 7 [%atom %ud]]. But the icon of the -leaf 7 is, again, [%cube 7 [%atom %ud]]. - -Copied the bottom half from existing doc. Not sure about this one... - ------------------------------------------------------------------------- - -### loobean - ------------------------------------------------------------------------- - -### payload - -[Cores](), at the most basic level, are of the structure [battery -payload]. The payload consists of the data contained within a core. You -can think of the payload as similar to the data of an object. - ------------------------------------------------------------------------- - -### pull - -To access a [wing]() or [limb]() in a [core]() is to pull it. For -instance, when we write `a.b.x` (a within b from x), we are pulling the -wing `a.b` from `x`. - ------------------------------------------------------------------------- - -### `%reed` - -A `[%reed p=tile q=tile]` is a [`++tile`]() whose [icon]() contains two -kinds of nouns: atoms of `++tile` `p` and cells of `++tile` `q`. The -rune associated with reeds is [`$|`](). - ------------------------------------------------------------------------- - -### sample - -In [gate]() construction, an arm is pulled from a [core]() and pushed -onto the subject creating a structure of [formula [sample context]], -where the sample represents the gate's inputs. All gates are constructed -with a default sample value. Thus, when we call a gate with arguments, -we are actually replacing its sample. - ------------------------------------------------------------------------- - -### slam - -To pull the empty name `$` on a [gate]() `g` with its [sample]() -replaced by a given input `a` is to slam `g` with `a`. You can think of -slamming like passing input parameters to a function that's being -called. - ------------------------------------------------------------------------- - -### subject - -All Hoon expressions a parsed into abstract syntax trees, which in Hoon -are called [++twig]()s. Twigs are [nouns]() that are converted into Nock -expressions, which are all of the basic form [subject formula], where -the subject is the data and the formula is the program. Thus, in both -Hoon and Nock, subject can refer to any piece of data that is being -operated on by a formula. - ------------------------------------------------------------------------- - -### `++tile` - -A `++tile` is a convenient way of specifying a type, which is its icon. -`++tile`s are used in a similar way to [type signatures]() for their -icons. - -SOMETHING ABOUT THE DIFFERENCE BETWEEN TWIG AND TILE AUTOCONS. - ------------------------------------------------------------------------- - -### weed - -A `[%weed p=twig]` - ------------------------------------------------------------------------- - -### wet - -In wet computations, the product type is checked to be the same as the -input type, rather than the [sample]() [tile](). The formal term for wet -is `%elm`. - ------------------------------------------------------------------------- - -### wing - -A wing is a list of limbs. For example, when we [pull] `a.b` from `x`, -`a.b` is a wing. `a` and `b` individually are both [limbs](). - ------------------------------------------------------------------------- - -
diff --git a/pub/doc/interpreter/vere.md b/pub/doc/interpreter/vere.md deleted file mode 100644 index f7b7d16055..0000000000 --- a/pub/doc/interpreter/vere.md +++ /dev/null @@ -1,183 +0,0 @@ -
- -vere -==== - -vere is the Urbit virtual machine. - - bin/vere -c ship - ------------------------------------------------------------------------- - -Options -------- - -### `-b` - -Batch create - ------------------------------------------------------------------------- - -### `-c` - -Create - -Creates a new pier. Takes a folder name, such as `pier`. - - bin/vere -c pier - ------------------------------------------------------------------------- - -### `-d` - -Daemon - ------------------------------------------------------------------------- - -### `-D` - -Dry compute - -dry compute the north and/or south events - ------------------------------------------------------------------------- - -### `-f` - -Fuzz testing - ------------------------------------------------------------------------- - -### `-k` - -Kernel version - ------------------------------------------------------------------------- - -### `-l` - -Raft port - ------------------------------------------------------------------------- - -### `-L` - -Localhost. - -Routes all networking over `0.0.0.0` and checks the keys. - - bin/vere -L -I ~del -c fz - ------------------------------------------------------------------------- - -### `-M` - -Memory madness - ------------------------------------------------------------------------- - -### `-n` - -Unix hostname - ------------------------------------------------------------------------- - -### `-p` - -Specify the [`%ames`](/doc/arvo/ames) udp listening port. - - bin/vere -p 42665 - -It can sometimes help if you get a port pointed at you and run vere with -`-p` to specify the [`%ames`](/doc/arvo/ames) udp listening port. VMs -and [docker](http://www.docker.com/) containers and the like tend to put -up some pretty effective barriers to -[NAT](http://en.wikipedia.org/wiki/Network_address_translation) [hole -punching](http://en.wikipedia.org/wiki/TCP_hole_punching). - ------------------------------------------------------------------------- - -### `-P` - -Profile - ------------------------------------------------------------------------- - -### `-q` - -Quite - -Inverse of [`-v`](#-v) - -See also: [`:verb`](/doc/arvo/util#verb)) - ------------------------------------------------------------------------- - -### `-r` - -Raft flotilla - -Also needs the [`-l`](#-l) option set. - ------------------------------------------------------------------------- - -### `-F` - -Fake - -Routes all networking over `0.0.0.0` and doesn't check any keys. This -allows you to start any carrier. - - bin/vere -F -I ~zod -c zod - -You get an isolated network with just yourself but you can [`:ticket`]() -other ships or start other ships or start other carriers. - ------------------------------------------------------------------------- - -### `-I` - -Imperial - -Takes a carrier name, such as `~zod`. - - bin/vere -F -I ~zod -c zod - ------------------------------------------------------------------------- - -### `-v` - -Verbose - -Inverse of [`-q`](#-q) - -See also: [`:verb`](reference/arvo/util.md#verb). - - bin/vere -v mypier - ------------------------------------------------------------------------- - -### `-X` - -Skip last event - - bin/vere -Xwtf mypier - ------------------------------------------------------------------------- - -Tips -==== - -Inability to mmap 2Gb with `MAP_FIXED` --------------------------------------- - -It's probably because of -[ASLR](http://en.wikipedia.org/wiki/Address_space_layout_randomization) -(some shared library got its data mapped in the middle of your address -space). If so, applying - - bash setarch `uname -m` -R ./bin/vere - -should help. - -
diff --git a/pub/doc/nock.mdy b/pub/doc/nock.mdy deleted file mode 100644 index c477a997f1..0000000000 --- a/pub/doc/nock.mdy +++ /dev/null @@ -1,41 +0,0 @@ ---- -sort: 0 ---- - -
- -nock -==== - -nock is our machine code. - -nock is a homoiconic combinator algebra, not much fancier than SKI -combinators. The spec fits on a T-shirt and gzips to 340 bytes. - -Think of nock as a kind of functional assembly language. It's not like -assembly language in that it's directly executed by the hardware. It is -like assembly language in that: - -- Everything in Urbit executes as nock. -- You wouldn't want to program directly in nock. -- Learning to program directly in nock is a great way to start - understanding urbit from the ground up. - -Just as Unix runs C programs by compiling them to assembler, Urbit runs -Hoon programs by compiling them to nock. You could try to learn Hoon -without learning nock. But just as C is a thin wrapper over the physical -CPU, Hoon is a thin wrapper over the nock virtual machine. It's a tall -stack made of thin layers, which is much easier to learn a layer at a -time. - -And unlike most fundamental theories of computing, there's really -nothing smart or interesting about nock. Of course, in a strictly formal -sense, all of computing is math. But that doesn't mean it needs to feel -like math. nock is a simple mechanical device and it's meant to feel -that way. - -
- ------------------------------------------------------------------------- - - diff --git a/pub/doc/tools.mdy b/pub/doc/tools.mdy deleted file mode 100644 index fe245b997c..0000000000 --- a/pub/doc/tools.mdy +++ /dev/null @@ -1,10 +0,0 @@ ---- -sort: 4 ---- - -Tools -==== - -User-level tools and utilities. - - diff --git a/pub/doc/tools/clay.md b/pub/doc/tools/clay.md deleted file mode 100644 index 0c81ff0135..0000000000 --- a/pub/doc/tools/clay.md +++ /dev/null @@ -1,209 +0,0 @@ -`%clay` -======== - -`%clay` filesystem utilities. - -## Paths - -### Structure - -Urbit paths have a very specific structure. First, since the clay -filesystem has a global namespace, the first element in any path -is the particular urbit whose filesystem you are trying to -access. - -The second element specifies which desk you wish to access on -that urbit. Desks are independent branches (in the -revision-control sense) of their filesystem. - -The third element specifies the revision number for that -desk. The remainder of the path is the path to the file. - -Thus, a path in clay is: - -`/urbit/desk/revision/path`. - -For example, to get revision 5 of `/try/readme/md` off the `home` -desk on `~sampel-sipnym`, use: - -`/~sampel-sipnym/home/5/try/readme/md`. - -### Shortcuts - -`%` refers to the current working -directory. `%%` refers to our parent, `%%%` refers to our -grandparent, and so forth. - -For example: - - XX TBD - - -From the other direction, inserting a `=` into a path copies the -corresponding element from the current path into the path that -you're trying to access. - -For example, if the current path is referencing our ship at the -current time, to reference `/try/readme`, use: - -`/===try/readme`. - - -### Accessing commits - -There are three ways to refer to particular commits in the -revision history. First, one can use the revision number. -Second, one can use any absolute time between the one numbered -commit and the next (inclusive of the first, exclusive of the -second). Thirdly, every desk has a map of labels to revision -numbers. These labels may be used to refer to specific commits. - - -## `ls` - -`+ls /path` gives a directory listing at a path - -## `cat` - -`+cat /path` -prints out the file at the given path. - -## `mount` - -It's often useful to "mount" the clay filesystem to unix, so that -you can interact with it with the traditional unix tools. The -syntax to do this is as follows: - - |mount /path [%mount-point] - -This mirrors the desk out to unix in at the path -` `. If you don't supply a -`%mount-point`, we use the last element in the path. Thus, if -you mount `%/pub/doc`, it'll by default put it in `doc`. - -*The mount point is monitored Dropbox-style, so every change you -make to the file in unix is automatically commited to clay.* - -You can unmount by specifying either the path or the mount point. - - |unmount /path - |unmount %mount-point - -## `merge` - -Often, it's useful to be able to merge a desk into another desk. -The other desk does not, of course, need to be on the same urbit: -for example, the standard way to distribute an app is to put it -on a desk and let other people merge it into their own urbit. - -The syntax is as follows: - - |merge %to-desk ~from-urbit %from-desk [%strategy] - -There are seven different merge strategies. Throughout our -discussion, we'll say that the merge is from Alice's desk to -Bob's. - -### Native strategies - -A `%init` merge should be used iff it's the first commit to a -desk. The head of Alice's desk is used as the number 1 commit to -Bob's desk. Obviously, the ancestry remains intact when -traversing the parentage of the commit, even though previous -commits are not numbered for Bob's desk. - -A `%this` merge means to keep what's in Bob's desk, but join the -ancestry. Thus, the new commit has the head of each desk as -parents, but the data is exactly what's in Bob's desk. For those -following along in git, this is the 'ours' merge strategy, not -the '--ours' option to the 'recursive' merge strategy. In other -words, even if Alice makes a change that does not conflict with -Bob, we throw it away. - -A `%that` merge means to take what's in Alice's desk, but join -the ancestry. This is the reverse of `%this`. - -A `%fine` merge is a "fast-forward" merge. This succeeds iff one -head is in the ancestry of the other. In this case, we use the -descendant as our new head. - -For `%meet`, `%mate`, and `%meld` merges, we first find the most -recent common ancestor to use as our merge base. If we have no -common ancestors, then we fail. If we have multiple most -recent common ancestors, then we have a criss-cross situation, -which should be handled delicately. At present, we don't handle -this kind of situation, but something akin to git's 'recursive' -strategy should be implemented in the future. - -There's a functional inclusion ordering on `%fine`, `%meet`, -`%mate`, and `%meld` such that if an earlier strategy would have -succeeded, then every later strategy will produce the same -result. Put another way, every earlier strategy is the same as -every later strategy except with a restricted domain. - -A `%meet` merge only succeeds if the changes from the merge base -to Alice's head (hereafter, "Alice's changes") are in different -files than Bob's changes. In this case, the parents are both -Alice's and Bob's heads, and the data is the merge base plus -Alice's changed files plus Bob's changed files. - -A `%mate` merge attempts to merge changes to the same file when -both Alice and Bob change it. If the merge is clean, we use it; -otherwise, we fail. A merge between different types of changes -- -for example, deleting a file vs changing it -- is always a -conflict. If we succeed, the parents are both Alice's and Bob's -heads, and the data is the merge base plus Alice's changed files -plus Bob's changed files plus the merged files. - -A `%meld` merge will succeed even if there are conflicts. If -there are conflicts in a file, then we use the merge base's -version of that file, and we produce a set of files with -conflicts. The parents are both Alice's and Bob's heads, and the -data is the merge base plus Alice's changed files plus Bob's -changed files plus the successfully merged files plus the merge -base's version of the conflicting files. - -### Metastrategies - -There's also a meta-strategy `%auto`, which is the most common. -If no strategy is supplied, then `%auto` is assumed. `%auto` -checks to see if Bob's desk exists, and if it doesn't we use a -`%init` merge. Otherwise, we progressively try `%fine`, -`%meet`, and `%mate` until one succeeds. - -If none succeed, we merge Bob's desk into a scratch desk. Then, -we merge Alice's desk into the scratch desk with the `%meld` -option to force the merge. For each file in the produced set of -conflicting files, we call the `++mash` function for the -appropriate mark, which annotates the conflicts if we know how. - -Finally, we display a message to the user informing them of the -scratch desk's existence, which files have annotated conflicts, -and which files have unannotated conflicts. When the user has -resolved the conflicts, they can merge the scratch desk back into -Bob's desk. This will be a `%fine` merge since Bob's head is in -the ancestry of the scratch desk. - -## Autosync - -Since clay is reactive, it's possible for changes to the -filesystem to cause various actions. An important use of this is -in enabling "autosync". When a desk is synced to another, any -changes to the first desk are automatically applied to the -second. - -This isn't simply mirroring, since the local desk might have -changes of its own. We use the full merge capabilities of clay -to try to make the merge clean. If there are conflicts, it'll -notify you and let you resolve them. - -There can be complex sync flows, some of which are useful. -Often, many urbits will be synced to some upstream desk that is -trusted to provide updates. Sometimes, it's useful to sync two -desks to each other, so that changes to one or the other are -mirrored. - -The syntax for syncing and unsyncing desks is as follows: - - |sync %to-desk ~from-urbit %from-desk - |unsync %to-desk ~from-urbit %from-desk \ No newline at end of file diff --git a/pub/doc/tools/moon.md b/pub/doc/tools/moon.md deleted file mode 100644 index 53ba9486fa..0000000000 --- a/pub/doc/tools/moon.md +++ /dev/null @@ -1,95 +0,0 @@ -`+ticket` -=================== - -Connecting your planet to moons. - -The Urbit namespace is designed for you to have a planet in the -cloud and your other devices (pc, laptop, phone) are moons. Once you -have a moon, you can setup 2 way sync of various desks, much like -Dropbox or a reactive git. If you `|mount` the desks, you can -have synchronization between directories on your moon and planet's -systems. - -Creating your moon ------------------- - -Each planet can issue 2^32 moons. Moon names look like two planet -names glued together and always end with the signing planet name. So -for the planet `~matfeb-sablud` one could generate a moon `~talsur- -todres-matfeb-sablud`. - -### On your planet - -All of your moons will sync from your `%kids` desk. Your planet -should come with one by default, but let's check to see: - -``` -> +ls /=kids= -``` - -You should get a list of directories. If you just get `~` you can -set one up with the following command: - -``` ->|sync %kids our %base -``` - -To generate a random moon you can run the following in the `dojo`: - -``` -+ticket =+(`@p`(cat 5 our (mod eny (pow 2 32))) ~&(- -)) -``` - -The output will print your moon name above the line with the -command. It will look something like this: - -``` -~some-moon-some-planet -> +ticket =+(`@p`(cat 5 our (mod eny (pow 2 32))) ~&(- -)) -~some-ticket -``` - -You'll use both of these values to create your moon in the next -step. - -### On your PC/laptop - -``` -> bin/urbit -w some-moon-some-planet -t some-ticket -``` - -Wait for it to boot up and there you have it. You've created your -moon which is tied to your planet. - -When you first boot your moon you will have a shell that is connected -to your planet. This might or might not be what you want. To get a -local shell, type `^d` at the dojo prompt, which should drop you into -the task manager. Then type `*dojo` to get a shell connected to your -moon. - -Setting up 2-way sync ---------------------- - -To create a 2 way syncronized desk between your moon and your -planet, simply do the following: - -On the moon: - -``` -> |sync %home ~matfeb-sablud %home -> |mount /=home= %home :: So you can see the files from Unix -``` - -On the planet: - -``` -> |sync %home ~hobdyl-pontyr-matfeb-sablud %home -``` - -The initial sync takes a little while, but after that you should be -able to create and edit files and have them sync up on the paired -system, much like your own personal Dropbox. - ---- - -[This guide brought to you by `~matfeb-sablud`] \ No newline at end of file diff --git a/pub/docs.mdy b/pub/docs.mdy new file mode 100644 index 0000000000..9b9292b88f --- /dev/null +++ b/pub/docs.mdy @@ -0,0 +1,24 @@ +--- +logo: black +anchor: none +layout: no-anchor +--- + +
+ +# Urbit documentation + +The Urbit doc is divided into three parts: [user doc](docs/user), +[developer doc](docs/dev), and [theory](docs/theory) (whitepaper, essays, +videos, etc). + +If you want to try Urbit, start with the user doc. If you want +to learn about Urbit, try the theory. Or just start with the +user doc; it doesn't assume any prior knowledge. + +The most fun thing to do with Urbit is code, but the developer +doc remains under construction. Sorry. We'll have more soon. + + + +
diff --git a/pub/docs/dev.mdy b/pub/docs/dev.mdy new file mode 100644 index 0000000000..506fb3c7d2 --- /dev/null +++ b/pub/docs/dev.mdy @@ -0,0 +1,27 @@ +--- +logo: black +title: Developer doc +sort: 2 +--- +
+ +# Developer documentation + +Urbit has three programming layers: Nock (combinator nano-VM), +Hoon (strict functional language), and Arvo (functional OS). + +To code in Urbit, the least you need to learn is Hoon, plus a +little bit of Arvo. Nock is a sort of functional assembly +language -- you don't need to know it, but it's useful to. +Nock is also the easiest thing in the world to learn. + +You can program for Arvo without knowing much about Arvo +internals, but again it helps. But you need to know Hoon. +Don't worry, it's easier than it looks. + +Alas, the developer doc is still under construction. We'll have +more soon. + + + +
diff --git a/pub/docs/dev/arvo.mdy b/pub/docs/dev/arvo.mdy new file mode 100644 index 0000000000..e9ab0ff74c --- /dev/null +++ b/pub/docs/dev/arvo.mdy @@ -0,0 +1,14 @@ +--- +logo: black +title: Arvo +sort: 3 +--- +
+ +# Arvo + +Arvo is a functional operating system. + +Watch this space for actual documentation. + +
diff --git a/pub/docs/dev/client.mdy b/pub/docs/dev/client.mdy new file mode 100644 index 0000000000..120519b6fb --- /dev/null +++ b/pub/docs/dev/client.mdy @@ -0,0 +1,11 @@ +--- +logo: black +title: Frontend tools +sort: 4 +hide: true +--- + +Frontend tools +============== + + \ No newline at end of file diff --git a/pub/doc/tools/tree.md b/pub/docs/dev/client/tree.md similarity index 71% rename from pub/doc/tools/tree.md rename to pub/docs/dev/client/tree.md index 38d6643d35..7234f5b305 100644 --- a/pub/doc/tools/tree.md +++ b/pub/docs/dev/client/tree.md @@ -1,8 +1,12 @@ # `:tree` -`:tree` static file hosting internals. +`:tree` is the web filesystem interface. Odds are this file has been rendered for you by `:tree`. -`:tree` is a single-page app that uses a backend in `/home/tree` to load contents from `%clay` as the user navigates around as `%json`. The frontend lives in `/home/pub/tree` and is a fairly straightforward [React](https://facebook.github.io/react/) / [Flux](https://facebook.github.io/flux/) app. +`:tree` is a single-page app that uses a backend in `/home/tree` to load +contents from `%clay` as the user navigates around as `%json`. The frontend +lives in `/home/pub/tree` and is a fairly straightforward +[React](https://facebook.github.io/react/) / +[Flux](https://facebook.github.io/flux/) app. ## Frontend @@ -14,9 +18,14 @@ The CSS is written in [Stylus](https://learnboost.github.io/stylus/). The main e ### JS -The JS is written in [CoffeeScript](http://coffeescript.org/) and packaged with [Browserify](http://browserify.org/). The main entry point is `main.coffee` and is compiled with `browserify -t coffeeify main.coffee > main.js`. You'll need to `npm install` first. +The JS is written in [CoffeeScript](http://coffeescript.org/) and packaged with +[Browserify](http://browserify.org/). The main entry point is `main.coffee` and +is compiled with `browserify -t coffeeify main.coffee > main.js`. You'll need +to `npm install` first. -Each page is loaded as JSON and then rendered using React on the page. This allows us to write JSX in our markdown to implement simple components. Check out `/home/pub/tree/src/js/components` to see the component library. +Each page is loaded as JSON and then rendered using React on the page. This +allows us to write JSX in our markdown to implement simple components. Check +out `/home/pub/tree/src/js/components` to see the component library. You'll notice that some of these doc pages use things like `` in the raw markdown files. diff --git a/pub/docs/dev/contributing.mdy b/pub/docs/dev/contributing.mdy new file mode 100644 index 0000000000..3c7a6858ba --- /dev/null +++ b/pub/docs/dev/contributing.mdy @@ -0,0 +1,213 @@ +--- +title: Contributing +sort: 6 +--- + +# Contributing to urbit + +Thank you for your interest in contributing to urbit. + +## Development practice + +You may have an identity on the live network, but doing all your +development on the live network would be cumbersome and unnecessary. +Standard practice in urbit development is to work on a fake `~zod`. A +fake `~zod` will get its initial files from the `urb/zod/` directory +rather than trying to sync them over the network, which is invaluable +for working in Hoon. Also, a fake `~zod` or any fake urbit instances you +start do not talk to the live network, but to a fake network that exists +only on your computer. + +To start a fake `~zod`, the command is: + + bin/urbit -F -I zod -c [pier directory] + +To resume one that was already created, just as on the live network, +remove `-c` (but leave the rest of the options there). `-F` uses the +fake network, and `-I` starts an "imperial" instance - that is, an 8-bit +galaxy. + +## Kernel development + +Working on either C or non-kernel Hoon should not bring any surprises, +but the Hoon kernel (anything under `urb/zod/arvo/`) is bootstrapped +from `urbit.pill` in `urb/`, and must be manually recompiled if any +changes are made. The command to manually recompile the kernel and +install the new kernel is `|reset` in `dojo`. This rebuilds from the +`arvo` directory in the `home` desk in `%clay`. Currently, `|reset` +does not reload apps like `dojo` itself, which will still reference the +old kernel. To force them to reload, make a trivial edit to their main +source file (under the `ape` directory) in `%clay`. + +If you do any kernel development, be sure to read the section below about +pills. + +## Git practice + +Since we use the GitHub issue tracker, it is helpful to contribute via a +GitHub pull request. If you already know what you are doing, skip down +to the Style section. + +Start by cloning the repository on your work machine: + + git clone https://github.com/urbit/urbit + +And, additionally, fork the repository on GitHub by clicking the "Fork" +button. Add your fork as a remote: + + git remote add [username] https://github.com/[username]/urbit + +and set it as the default remote to push to: + + git config --local remote.pushDefault [username] + +This is good practice for any project that uses git. You will pull +upstream branches from urbit/urbit and push to your personal urbit fork +by default. + +Next, check out `test`, which is the mainline development branch, and +base a new branch on it to do your work on: + + git checkout test + git checkout -b [branch name] + +Now you are free to do your work on this branch. When finished, you may +want to clean up your commits: + + git rebase -i test + +Then you can push to your public fork with `git push` and make a pull +request via the GitHub UI. Make sure you request to merge your branch +into `test`, not `master`. + +After your changes are merged upstream, you can delete your branch (via +github UI or `git push :[branch]` remotely, and with `git branch -d` +locally). + +## Style + +The urbit project uses two-space indentation and avoids tab characters. +In C code, it should not be too difficult to mimic the style of the code +around you, which is just fairly standard K&R with braces on every +compound statement. One thing to watch out for is top-level sections in +source files that are denoted by comments and are actually indented one +level. + +Hoon will be a less familiar language to many contributors. Some of our +less obvious stylistic rules are: + +- Keep your source files 80 characters or less wide. Many urbit + developers use 80 character terminals/tmux panes/&c. +- Tab characters are actually a syntax error, so be extra sure your + editor is not inserting any. Trailing whitespace is *usually* not a + syntax error, but avoiding it is encouraged. +- The kernel convention is that line comments start at column 57 with + the `::` followed by 2 spaces. This leaves 20 characters for the + comment. Outside the kernel, things are less strict. +- Tall arms within a core are conventionally separated by empty comments + (just `::`) at the same indentation level as the initial `++` or `+-`. + The last arm in a core is not followed by an empty comment, because it + is visually closed by the `--` that closes the core. The empty comment + is also sometimes omitted in data structure definitions. + +## The kernel and pills + +urbit bootstraps itself using a binary blob called `urbit.pill`, which +we do indeed keep in version control. This creates some special +requirements. If you are not changing anything in the kernel (everything +under `urb/zod/arvo/`) then you can skim this section (please do not +skip it entirely, though). If you *are* working there, then this +section is critically important! + +The procedure for creating `urbit.pill` is often called "soliding". It +is somewhat similar to `|reset`, but instead of replacing your running +kernel, it writes the compiled kernel to a file. The command to solid +is, on a fakezod: + + .urbit/pill +solid + +When the compilation finishes, your `urbit.pill` will be found in the +`[pier]/.urb/put/` directory. Copy it into `urb/` and add it to your +commit. + +The requirement here is that every commit that changes the kernel must +come with an `urbit.pill` built from the same code in `urb/zod/arvo/` +for that commit. (Only changing the actual Hoon code counts, so a change +to a jet with no corresponding Hoon change does not require a new pill.) +This is so that checking out an arbitrary revision and starting up a +fakezod actually works as expected. However you do this is fine, but I +like to do it as part of my committing process - just before `git +commit`, I fire up a new fakezod. This will use the previous +`urbit.pill`, but the kernel code in `%clay` will be copied from +`urb/zod/arvo/`, so `+solid` will compile it. Then I copy `urbit.pill` +into `urb/` and make my commit. + +If you rebase or interactive rebase your commits, be sure to preserve +this property on all the commits you end up with. If multiple people +were collaborating on your branch, you may end up with conflicts in +`urbit.pill` and have to merge the branch into itself to resolve them. +Just do the same procedure to create a new, merged pill before +committing the merge. Otherwise, just make sure to use the correct +`urbit.pill` for each commit. + +## Debug urbit with `gdb` + +Follow the build instructions in README.md but run `make` with argument `DEBUG=yes`: + +(If you've already built urbit first run `make clean`.) + + make DEBUG=yes + +Run `gdb`, while loading `bin/urbit` and its symbol table: + + gdb bin/urbit + +Set a breakpoint on `main()` (optional): + + break main + +Run your urbit comet `mycomet`: + + run mycomet + +Continue from the breakpoint on `main()`: + + continue + +## What to work on + +If you are not thinking of contributing with a specific goal in mind, +the GitHub issue tracker is the first place you should look for ideas. +Issues are tagged with a priority and a difficulty. A good place to +start is on either a low-difficulty issue or a low-priority issue. +Higher priority issues are likely to be assigned to someone - if this is +the case, then contacting that person to coordinate before starting to +work is probably a good idea. + +There is also a "help wanted" tag for things that we are especially +eager to have outside contributions on. Check here first! + +## Staying in touch + +The urbit developers communicate on urbit itself. Joining the +`~doznec/urbit-meta` channel on `talk` is highly recommended. +Subscribing to `urbit-dev` on Google Groups is also recommended, since +this is where continuity breach notifications are sent. + +You can also contact one of the following people: + +- Philip Monk + + email: philip.monk@tlon.io + + urbit: `~wictuc-folrex` + + GitHub: [@philipcmonk](https://github.com/philipcmonk/) + +- Raymond Pasco + + email: ray@the.ug + + urbit: `~ramtev-wisbyt` + + GitHub: [@juped](https://github.com/juped/) \ No newline at end of file diff --git a/pub/docs/dev/hoon.mdy b/pub/docs/dev/hoon.mdy new file mode 100644 index 0000000000..d1afa0199f --- /dev/null +++ b/pub/docs/dev/hoon.mdy @@ -0,0 +1,15 @@ +--- +logo: black +title: Hoon +sort: 2 +--- + +
+ +# Hoon + +Hoon is a strict, typed, pure functional language. + +Watch this space for actual documentation. + +
diff --git a/pub/doc/hoon/library.md b/pub/docs/dev/hoon/library.md similarity index 100% rename from pub/doc/hoon/library.md rename to pub/docs/dev/hoon/library.md diff --git a/pub/doc/hoon/library/0.md b/pub/docs/dev/hoon/library/0.md similarity index 100% rename from pub/doc/hoon/library/0.md rename to pub/docs/dev/hoon/library/0.md diff --git a/pub/doc/hoon/library/1.md b/pub/docs/dev/hoon/library/1.md similarity index 100% rename from pub/doc/hoon/library/1.md rename to pub/docs/dev/hoon/library/1.md diff --git a/pub/doc/hoon/library/2a.md b/pub/docs/dev/hoon/library/2a.md similarity index 100% rename from pub/doc/hoon/library/2a.md rename to pub/docs/dev/hoon/library/2a.md diff --git a/pub/doc/hoon/library/2b.md b/pub/docs/dev/hoon/library/2b.md similarity index 100% rename from pub/doc/hoon/library/2b.md rename to pub/docs/dev/hoon/library/2b.md diff --git a/pub/doc/hoon/library/2c.md b/pub/docs/dev/hoon/library/2c.md similarity index 100% rename from pub/doc/hoon/library/2c.md rename to pub/docs/dev/hoon/library/2c.md diff --git a/pub/doc/hoon/library/2da.md b/pub/docs/dev/hoon/library/2da.md similarity index 100% rename from pub/doc/hoon/library/2da.md rename to pub/docs/dev/hoon/library/2da.md diff --git a/pub/doc/hoon/library/2db.md b/pub/docs/dev/hoon/library/2db.md similarity index 100% rename from pub/doc/hoon/library/2db.md rename to pub/docs/dev/hoon/library/2db.md diff --git a/pub/doc/hoon/library/2dc.md b/pub/docs/dev/hoon/library/2dc.md similarity index 100% rename from pub/doc/hoon/library/2dc.md rename to pub/docs/dev/hoon/library/2dc.md diff --git a/pub/doc/hoon/library/2dd.md b/pub/docs/dev/hoon/library/2dd.md similarity index 100% rename from pub/doc/hoon/library/2dd.md rename to pub/docs/dev/hoon/library/2dd.md diff --git a/pub/doc/hoon/library/2ea.md b/pub/docs/dev/hoon/library/2ea.md similarity index 100% rename from pub/doc/hoon/library/2ea.md rename to pub/docs/dev/hoon/library/2ea.md diff --git a/pub/doc/hoon/library/2eb.md b/pub/docs/dev/hoon/library/2eb.md similarity index 100% rename from pub/doc/hoon/library/2eb.md rename to pub/docs/dev/hoon/library/2eb.md diff --git a/pub/doc/hoon/library/2ec.md b/pub/docs/dev/hoon/library/2ec.md similarity index 100% rename from pub/doc/hoon/library/2ec.md rename to pub/docs/dev/hoon/library/2ec.md diff --git a/pub/doc/hoon/library/2ed.md b/pub/docs/dev/hoon/library/2ed.md similarity index 100% rename from pub/doc/hoon/library/2ed.md rename to pub/docs/dev/hoon/library/2ed.md diff --git a/pub/doc/hoon/library/2ee.md b/pub/docs/dev/hoon/library/2ee.md similarity index 100% rename from pub/doc/hoon/library/2ee.md rename to pub/docs/dev/hoon/library/2ee.md diff --git a/pub/doc/hoon/library/2ef.md b/pub/docs/dev/hoon/library/2ef.md similarity index 100% rename from pub/doc/hoon/library/2ef.md rename to pub/docs/dev/hoon/library/2ef.md diff --git a/pub/doc/hoon/library/2eg.md b/pub/docs/dev/hoon/library/2eg.md similarity index 100% rename from pub/doc/hoon/library/2eg.md rename to pub/docs/dev/hoon/library/2eg.md diff --git a/pub/doc/hoon/library/2eh.md b/pub/docs/dev/hoon/library/2eh.md similarity index 100% rename from pub/doc/hoon/library/2eh.md rename to pub/docs/dev/hoon/library/2eh.md diff --git a/pub/doc/hoon/library/2ei.md b/pub/docs/dev/hoon/library/2ei.md similarity index 100% rename from pub/doc/hoon/library/2ei.md rename to pub/docs/dev/hoon/library/2ei.md diff --git a/pub/doc/hoon/library/2ej.md b/pub/docs/dev/hoon/library/2ej.md similarity index 100% rename from pub/doc/hoon/library/2ej.md rename to pub/docs/dev/hoon/library/2ej.md diff --git a/pub/doc/hoon/library/2ek.md b/pub/docs/dev/hoon/library/2ek.md similarity index 100% rename from pub/doc/hoon/library/2ek.md rename to pub/docs/dev/hoon/library/2ek.md diff --git a/pub/doc/hoon/library/2el.md b/pub/docs/dev/hoon/library/2el.md similarity index 100% rename from pub/doc/hoon/library/2el.md rename to pub/docs/dev/hoon/library/2el.md diff --git a/pub/doc/hoon/library/2em.md b/pub/docs/dev/hoon/library/2em.md similarity index 100% rename from pub/doc/hoon/library/2em.md rename to pub/docs/dev/hoon/library/2em.md diff --git a/pub/doc/hoon/library/2en.md b/pub/docs/dev/hoon/library/2en.md similarity index 100% rename from pub/doc/hoon/library/2en.md rename to pub/docs/dev/hoon/library/2en.md diff --git a/pub/doc/hoon/library/2eo.md b/pub/docs/dev/hoon/library/2eo.md similarity index 100% rename from pub/doc/hoon/library/2eo.md rename to pub/docs/dev/hoon/library/2eo.md diff --git a/pub/doc/hoon/library/2ep.md b/pub/docs/dev/hoon/library/2ep.md similarity index 100% rename from pub/doc/hoon/library/2ep.md rename to pub/docs/dev/hoon/library/2ep.md diff --git a/pub/doc/hoon/library/2ew.md b/pub/docs/dev/hoon/library/2ew.md similarity index 100% rename from pub/doc/hoon/library/2ew.md rename to pub/docs/dev/hoon/library/2ew.md diff --git a/pub/doc/hoon/library/2ex.md b/pub/docs/dev/hoon/library/2ex.md similarity index 100% rename from pub/doc/hoon/library/2ex.md rename to pub/docs/dev/hoon/library/2ex.md diff --git a/pub/doc/hoon/library/2ey.md b/pub/docs/dev/hoon/library/2ey.md similarity index 100% rename from pub/doc/hoon/library/2ey.md rename to pub/docs/dev/hoon/library/2ey.md diff --git a/pub/doc/hoon/library/2ez.md b/pub/docs/dev/hoon/library/2ez.md similarity index 100% rename from pub/doc/hoon/library/2ez.md rename to pub/docs/dev/hoon/library/2ez.md diff --git a/pub/doc/hoon/library/3ba.md b/pub/docs/dev/hoon/library/3ba.md similarity index 100% rename from pub/doc/hoon/library/3ba.md rename to pub/docs/dev/hoon/library/3ba.md diff --git a/pub/doc/hoon/library/3bb.md b/pub/docs/dev/hoon/library/3bb.md similarity index 100% rename from pub/doc/hoon/library/3bb.md rename to pub/docs/dev/hoon/library/3bb.md diff --git a/pub/doc/hoon/library/3bc.md b/pub/docs/dev/hoon/library/3bc.md similarity index 100% rename from pub/doc/hoon/library/3bc.md rename to pub/docs/dev/hoon/library/3bc.md diff --git a/pub/doc/hoon/library/3bd.md b/pub/docs/dev/hoon/library/3bd.md similarity index 100% rename from pub/doc/hoon/library/3bd.md rename to pub/docs/dev/hoon/library/3bd.md diff --git a/pub/doc/hoon/library/3be.md b/pub/docs/dev/hoon/library/3be.md similarity index 100% rename from pub/doc/hoon/library/3be.md rename to pub/docs/dev/hoon/library/3be.md diff --git a/pub/doc/hoon/library/3bf.md b/pub/docs/dev/hoon/library/3bf.md similarity index 100% rename from pub/doc/hoon/library/3bf.md rename to pub/docs/dev/hoon/library/3bf.md diff --git a/pub/doc/hoon/library/3bg.md b/pub/docs/dev/hoon/library/3bg.md similarity index 100% rename from pub/doc/hoon/library/3bg.md rename to pub/docs/dev/hoon/library/3bg.md diff --git a/pub/doc/hoon/library/3bh.md b/pub/docs/dev/hoon/library/3bh.md similarity index 100% rename from pub/doc/hoon/library/3bh.md rename to pub/docs/dev/hoon/library/3bh.md diff --git a/pub/doc/hoon/library/3bi.md b/pub/docs/dev/hoon/library/3bi.md similarity index 100% rename from pub/doc/hoon/library/3bi.md rename to pub/docs/dev/hoon/library/3bi.md diff --git a/pub/doc/hoon/reference.md b/pub/docs/dev/hoon/reference.md similarity index 100% rename from pub/doc/hoon/reference.md rename to pub/docs/dev/hoon/reference.md diff --git a/pub/doc/hoon/reference/odors.md b/pub/docs/dev/hoon/reference/odors.md similarity index 100% rename from pub/doc/hoon/reference/odors.md rename to pub/docs/dev/hoon/reference/odors.md diff --git a/pub/doc/hoon/reference/pronunciation.md b/pub/docs/dev/hoon/reference/pronunciation.md similarity index 100% rename from pub/doc/hoon/reference/pronunciation.md rename to pub/docs/dev/hoon/reference/pronunciation.md diff --git a/pub/doc/hoon/runes.md b/pub/docs/dev/hoon/runes.md similarity index 100% rename from pub/doc/hoon/runes.md rename to pub/docs/dev/hoon/runes.md diff --git a/pub/doc/hoon/runes/bc.md b/pub/docs/dev/hoon/runes/bc.md similarity index 100% rename from pub/doc/hoon/runes/bc.md rename to pub/docs/dev/hoon/runes/bc.md diff --git a/pub/doc/hoon/runes/bc/bcbr.md b/pub/docs/dev/hoon/runes/bc/bcbr.md similarity index 100% rename from pub/doc/hoon/runes/bc/bcbr.md rename to pub/docs/dev/hoon/runes/bc/bcbr.md diff --git a/pub/doc/hoon/runes/bc/bccb.md b/pub/docs/dev/hoon/runes/bc/bccb.md similarity index 100% rename from pub/doc/hoon/runes/bc/bccb.md rename to pub/docs/dev/hoon/runes/bc/bccb.md diff --git a/pub/doc/hoon/runes/bc/bccl.md b/pub/docs/dev/hoon/runes/bc/bccl.md similarity index 100% rename from pub/doc/hoon/runes/bc/bccl.md rename to pub/docs/dev/hoon/runes/bc/bccl.md diff --git a/pub/doc/hoon/runes/bc/bccm.md b/pub/docs/dev/hoon/runes/bc/bccm.md similarity index 100% rename from pub/doc/hoon/runes/bc/bccm.md rename to pub/docs/dev/hoon/runes/bc/bccm.md diff --git a/pub/doc/hoon/runes/bc/bccn.md b/pub/docs/dev/hoon/runes/bc/bccn.md similarity index 100% rename from pub/doc/hoon/runes/bc/bccn.md rename to pub/docs/dev/hoon/runes/bc/bccn.md diff --git a/pub/doc/hoon/runes/bc/bckt.md b/pub/docs/dev/hoon/runes/bc/bckt.md similarity index 100% rename from pub/doc/hoon/runes/bc/bckt.md rename to pub/docs/dev/hoon/runes/bc/bckt.md diff --git a/pub/doc/hoon/runes/bc/bcls.md b/pub/docs/dev/hoon/runes/bc/bcls.md similarity index 100% rename from pub/doc/hoon/runes/bc/bcls.md rename to pub/docs/dev/hoon/runes/bc/bcls.md diff --git a/pub/doc/hoon/runes/bc/bcpm.md b/pub/docs/dev/hoon/runes/bc/bcpm.md similarity index 100% rename from pub/doc/hoon/runes/bc/bcpm.md rename to pub/docs/dev/hoon/runes/bc/bcpm.md diff --git a/pub/doc/hoon/runes/bc/bcpt.md b/pub/docs/dev/hoon/runes/bc/bcpt.md similarity index 100% rename from pub/doc/hoon/runes/bc/bcpt.md rename to pub/docs/dev/hoon/runes/bc/bcpt.md diff --git a/pub/doc/hoon/runes/bc/bctr.md b/pub/docs/dev/hoon/runes/bc/bctr.md similarity index 100% rename from pub/doc/hoon/runes/bc/bctr.md rename to pub/docs/dev/hoon/runes/bc/bctr.md diff --git a/pub/doc/hoon/runes/bc/bcts.md b/pub/docs/dev/hoon/runes/bc/bcts.md similarity index 100% rename from pub/doc/hoon/runes/bc/bcts.md rename to pub/docs/dev/hoon/runes/bc/bcts.md diff --git a/pub/doc/hoon/runes/bc/bcwt.md b/pub/docs/dev/hoon/runes/bc/bcwt.md similarity index 100% rename from pub/doc/hoon/runes/bc/bcwt.md rename to pub/docs/dev/hoon/runes/bc/bcwt.md diff --git a/pub/doc/hoon/runes/bc/bczp.md b/pub/docs/dev/hoon/runes/bc/bczp.md similarity index 100% rename from pub/doc/hoon/runes/bc/bczp.md rename to pub/docs/dev/hoon/runes/bc/bczp.md diff --git a/pub/doc/hoon/runes/br.md b/pub/docs/dev/hoon/runes/br.md similarity index 100% rename from pub/doc/hoon/runes/br.md rename to pub/docs/dev/hoon/runes/br.md diff --git a/pub/doc/hoon/runes/br/brcb.md b/pub/docs/dev/hoon/runes/br/brcb.md similarity index 100% rename from pub/doc/hoon/runes/br/brcb.md rename to pub/docs/dev/hoon/runes/br/brcb.md diff --git a/pub/doc/hoon/runes/br/brcn.md b/pub/docs/dev/hoon/runes/br/brcn.md similarity index 100% rename from pub/doc/hoon/runes/br/brcn.md rename to pub/docs/dev/hoon/runes/br/brcn.md diff --git a/pub/doc/hoon/runes/br/brdt.md b/pub/docs/dev/hoon/runes/br/brdt.md similarity index 100% rename from pub/doc/hoon/runes/br/brdt.md rename to pub/docs/dev/hoon/runes/br/brdt.md diff --git a/pub/doc/hoon/runes/br/brfs.md b/pub/docs/dev/hoon/runes/br/brfs.md similarity index 100% rename from pub/doc/hoon/runes/br/brfs.md rename to pub/docs/dev/hoon/runes/br/brfs.md diff --git a/pub/doc/hoon/runes/br/brhp.md b/pub/docs/dev/hoon/runes/br/brhp.md similarity index 100% rename from pub/doc/hoon/runes/br/brhp.md rename to pub/docs/dev/hoon/runes/br/brhp.md diff --git a/pub/doc/hoon/runes/br/brkt.md b/pub/docs/dev/hoon/runes/br/brkt.md similarity index 100% rename from pub/doc/hoon/runes/br/brkt.md rename to pub/docs/dev/hoon/runes/br/brkt.md diff --git a/pub/doc/hoon/runes/br/brls.md b/pub/docs/dev/hoon/runes/br/brls.md similarity index 100% rename from pub/doc/hoon/runes/br/brls.md rename to pub/docs/dev/hoon/runes/br/brls.md diff --git a/pub/doc/hoon/runes/br/brtr.md b/pub/docs/dev/hoon/runes/br/brtr.md similarity index 100% rename from pub/doc/hoon/runes/br/brtr.md rename to pub/docs/dev/hoon/runes/br/brtr.md diff --git a/pub/doc/hoon/runes/br/brts.md b/pub/docs/dev/hoon/runes/br/brts.md similarity index 100% rename from pub/doc/hoon/runes/br/brts.md rename to pub/docs/dev/hoon/runes/br/brts.md diff --git a/pub/doc/hoon/runes/br/brwt.md b/pub/docs/dev/hoon/runes/br/brwt.md similarity index 100% rename from pub/doc/hoon/runes/br/brwt.md rename to pub/docs/dev/hoon/runes/br/brwt.md diff --git a/pub/doc/hoon/runes/cl.md b/pub/docs/dev/hoon/runes/cl.md similarity index 100% rename from pub/doc/hoon/runes/cl.md rename to pub/docs/dev/hoon/runes/cl.md diff --git a/pub/doc/hoon/runes/cl/clcb.md b/pub/docs/dev/hoon/runes/cl/clcb.md similarity index 100% rename from pub/doc/hoon/runes/cl/clcb.md rename to pub/docs/dev/hoon/runes/cl/clcb.md diff --git a/pub/doc/hoon/runes/cl/clfs.md b/pub/docs/dev/hoon/runes/cl/clfs.md similarity index 100% rename from pub/doc/hoon/runes/cl/clfs.md rename to pub/docs/dev/hoon/runes/cl/clfs.md diff --git a/pub/doc/hoon/runes/cl/clhp.md b/pub/docs/dev/hoon/runes/cl/clhp.md similarity index 100% rename from pub/doc/hoon/runes/cl/clhp.md rename to pub/docs/dev/hoon/runes/cl/clhp.md diff --git a/pub/doc/hoon/runes/cl/clkt.md b/pub/docs/dev/hoon/runes/cl/clkt.md similarity index 100% rename from pub/doc/hoon/runes/cl/clkt.md rename to pub/docs/dev/hoon/runes/cl/clkt.md diff --git a/pub/doc/hoon/runes/cl/clls.md b/pub/docs/dev/hoon/runes/cl/clls.md similarity index 100% rename from pub/doc/hoon/runes/cl/clls.md rename to pub/docs/dev/hoon/runes/cl/clls.md diff --git a/pub/doc/hoon/runes/cl/clsg.md b/pub/docs/dev/hoon/runes/cl/clsg.md similarity index 100% rename from pub/doc/hoon/runes/cl/clsg.md rename to pub/docs/dev/hoon/runes/cl/clsg.md diff --git a/pub/doc/hoon/runes/cl/cltr.md b/pub/docs/dev/hoon/runes/cl/cltr.md similarity index 100% rename from pub/doc/hoon/runes/cl/cltr.md rename to pub/docs/dev/hoon/runes/cl/cltr.md diff --git a/pub/doc/hoon/runes/cn.md b/pub/docs/dev/hoon/runes/cn.md similarity index 100% rename from pub/doc/hoon/runes/cn.md rename to pub/docs/dev/hoon/runes/cn.md diff --git a/pub/doc/hoon/runes/cn/cncb.md b/pub/docs/dev/hoon/runes/cn/cncb.md similarity index 100% rename from pub/doc/hoon/runes/cn/cncb.md rename to pub/docs/dev/hoon/runes/cn/cncb.md diff --git a/pub/doc/hoon/runes/cn/cncl.md b/pub/docs/dev/hoon/runes/cn/cncl.md similarity index 100% rename from pub/doc/hoon/runes/cn/cncl.md rename to pub/docs/dev/hoon/runes/cn/cncl.md diff --git a/pub/doc/hoon/runes/cn/cndt.md b/pub/docs/dev/hoon/runes/cn/cndt.md similarity index 100% rename from pub/doc/hoon/runes/cn/cndt.md rename to pub/docs/dev/hoon/runes/cn/cndt.md diff --git a/pub/doc/hoon/runes/cn/cnhp.md b/pub/docs/dev/hoon/runes/cn/cnhp.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnhp.md rename to pub/docs/dev/hoon/runes/cn/cnhp.md diff --git a/pub/doc/hoon/runes/cn/cnkt.md b/pub/docs/dev/hoon/runes/cn/cnkt.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnkt.md rename to pub/docs/dev/hoon/runes/cn/cnkt.md diff --git a/pub/doc/hoon/runes/cn/cnls.md b/pub/docs/dev/hoon/runes/cn/cnls.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnls.md rename to pub/docs/dev/hoon/runes/cn/cnls.md diff --git a/pub/doc/hoon/runes/cn/cnsg.md b/pub/docs/dev/hoon/runes/cn/cnsg.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnsg.md rename to pub/docs/dev/hoon/runes/cn/cnsg.md diff --git a/pub/doc/hoon/runes/cn/cntr.md b/pub/docs/dev/hoon/runes/cn/cntr.md similarity index 100% rename from pub/doc/hoon/runes/cn/cntr.md rename to pub/docs/dev/hoon/runes/cn/cntr.md diff --git a/pub/doc/hoon/runes/cn/cnts.md b/pub/docs/dev/hoon/runes/cn/cnts.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnts.md rename to pub/docs/dev/hoon/runes/cn/cnts.md diff --git a/pub/doc/hoon/runes/cn/cnzy.md b/pub/docs/dev/hoon/runes/cn/cnzy.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnzy.md rename to pub/docs/dev/hoon/runes/cn/cnzy.md diff --git a/pub/doc/hoon/runes/cn/cnzz.md b/pub/docs/dev/hoon/runes/cn/cnzz.md similarity index 100% rename from pub/doc/hoon/runes/cn/cnzz.md rename to pub/docs/dev/hoon/runes/cn/cnzz.md diff --git a/pub/doc/hoon/runes/dt.md b/pub/docs/dev/hoon/runes/dt.md similarity index 100% rename from pub/doc/hoon/runes/dt.md rename to pub/docs/dev/hoon/runes/dt.md diff --git a/pub/doc/hoon/runes/dt/dtkt.md b/pub/docs/dev/hoon/runes/dt/dtkt.md similarity index 100% rename from pub/doc/hoon/runes/dt/dtkt.md rename to pub/docs/dev/hoon/runes/dt/dtkt.md diff --git a/pub/doc/hoon/runes/dt/dtls.md b/pub/docs/dev/hoon/runes/dt/dtls.md similarity index 100% rename from pub/doc/hoon/runes/dt/dtls.md rename to pub/docs/dev/hoon/runes/dt/dtls.md diff --git a/pub/doc/hoon/runes/dt/dttr.md b/pub/docs/dev/hoon/runes/dt/dttr.md similarity index 100% rename from pub/doc/hoon/runes/dt/dttr.md rename to pub/docs/dev/hoon/runes/dt/dttr.md diff --git a/pub/doc/hoon/runes/dt/dtts.md b/pub/docs/dev/hoon/runes/dt/dtts.md similarity index 100% rename from pub/doc/hoon/runes/dt/dtts.md rename to pub/docs/dev/hoon/runes/dt/dtts.md diff --git a/pub/doc/hoon/runes/dt/dtwt.md b/pub/docs/dev/hoon/runes/dt/dtwt.md similarity index 100% rename from pub/doc/hoon/runes/dt/dtwt.md rename to pub/docs/dev/hoon/runes/dt/dtwt.md diff --git a/pub/doc/hoon/runes/dt/dtzy.md b/pub/docs/dev/hoon/runes/dt/dtzy.md similarity index 100% rename from pub/doc/hoon/runes/dt/dtzy.md rename to pub/docs/dev/hoon/runes/dt/dtzy.md diff --git a/pub/doc/hoon/runes/dt/dtzz.md b/pub/docs/dev/hoon/runes/dt/dtzz.md similarity index 100% rename from pub/doc/hoon/runes/dt/dtzz.md rename to pub/docs/dev/hoon/runes/dt/dtzz.md diff --git a/pub/doc/hoon/runes/hx.md b/pub/docs/dev/hoon/runes/hx.md similarity index 100% rename from pub/doc/hoon/runes/hx.md rename to pub/docs/dev/hoon/runes/hx.md diff --git a/pub/doc/hoon/runes/hx/hxgl.md b/pub/docs/dev/hoon/runes/hx/hxgl.md similarity index 100% rename from pub/doc/hoon/runes/hx/hxgl.md rename to pub/docs/dev/hoon/runes/hx/hxgl.md diff --git a/pub/doc/hoon/runes/hx/hxgr.md b/pub/docs/dev/hoon/runes/hx/hxgr.md similarity index 100% rename from pub/doc/hoon/runes/hx/hxgr.md rename to pub/docs/dev/hoon/runes/hx/hxgr.md diff --git a/pub/doc/hoon/runes/index.hook b/pub/docs/dev/hoon/runes/index.hook similarity index 100% rename from pub/doc/hoon/runes/index.hook rename to pub/docs/dev/hoon/runes/index.hook diff --git a/pub/doc/hoon/runes/kt.md b/pub/docs/dev/hoon/runes/kt.md similarity index 100% rename from pub/doc/hoon/runes/kt.md rename to pub/docs/dev/hoon/runes/kt.md diff --git a/pub/doc/hoon/runes/kt/ktbr.md b/pub/docs/dev/hoon/runes/kt/ktbr.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktbr.md rename to pub/docs/dev/hoon/runes/kt/ktbr.md diff --git a/pub/doc/hoon/runes/kt/ktdt.md b/pub/docs/dev/hoon/runes/kt/ktdt.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktdt.md rename to pub/docs/dev/hoon/runes/kt/ktdt.md diff --git a/pub/doc/hoon/runes/kt/kthp.md b/pub/docs/dev/hoon/runes/kt/kthp.md similarity index 100% rename from pub/doc/hoon/runes/kt/kthp.md rename to pub/docs/dev/hoon/runes/kt/kthp.md diff --git a/pub/doc/hoon/runes/kt/ktls.md b/pub/docs/dev/hoon/runes/kt/ktls.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktls.md rename to pub/docs/dev/hoon/runes/kt/ktls.md diff --git a/pub/doc/hoon/runes/kt/ktpm.md b/pub/docs/dev/hoon/runes/kt/ktpm.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktpm.md rename to pub/docs/dev/hoon/runes/kt/ktpm.md diff --git a/pub/doc/hoon/runes/kt/ktsg.md b/pub/docs/dev/hoon/runes/kt/ktsg.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktsg.md rename to pub/docs/dev/hoon/runes/kt/ktsg.md diff --git a/pub/doc/hoon/runes/kt/ktts.md b/pub/docs/dev/hoon/runes/kt/ktts.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktts.md rename to pub/docs/dev/hoon/runes/kt/ktts.md diff --git a/pub/doc/hoon/runes/kt/ktwt.md b/pub/docs/dev/hoon/runes/kt/ktwt.md similarity index 100% rename from pub/doc/hoon/runes/kt/ktwt.md rename to pub/docs/dev/hoon/runes/kt/ktwt.md diff --git a/pub/doc/hoon/runes/sg.md b/pub/docs/dev/hoon/runes/sg.md similarity index 100% rename from pub/doc/hoon/runes/sg.md rename to pub/docs/dev/hoon/runes/sg.md diff --git a/pub/doc/hoon/runes/sg/sgbc.md b/pub/docs/dev/hoon/runes/sg/sgbc.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgbc.md rename to pub/docs/dev/hoon/runes/sg/sgbc.md diff --git a/pub/doc/hoon/runes/sg/sgbr.md b/pub/docs/dev/hoon/runes/sg/sgbr.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgbr.md rename to pub/docs/dev/hoon/runes/sg/sgbr.md diff --git a/pub/doc/hoon/runes/sg/sgcb.md b/pub/docs/dev/hoon/runes/sg/sgcb.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgcb.md rename to pub/docs/dev/hoon/runes/sg/sgcb.md diff --git a/pub/doc/hoon/runes/sg/sgcn.md b/pub/docs/dev/hoon/runes/sg/sgcn.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgcn.md rename to pub/docs/dev/hoon/runes/sg/sgcn.md diff --git a/pub/doc/hoon/runes/sg/sgfs.md b/pub/docs/dev/hoon/runes/sg/sgfs.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgfs.md rename to pub/docs/dev/hoon/runes/sg/sgfs.md diff --git a/pub/doc/hoon/runes/sg/sggl.md b/pub/docs/dev/hoon/runes/sg/sggl.md similarity index 100% rename from pub/doc/hoon/runes/sg/sggl.md rename to pub/docs/dev/hoon/runes/sg/sggl.md diff --git a/pub/doc/hoon/runes/sg/sggr.md b/pub/docs/dev/hoon/runes/sg/sggr.md similarity index 100% rename from pub/doc/hoon/runes/sg/sggr.md rename to pub/docs/dev/hoon/runes/sg/sggr.md diff --git a/pub/doc/hoon/runes/sg/sgls.md b/pub/docs/dev/hoon/runes/sg/sgls.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgls.md rename to pub/docs/dev/hoon/runes/sg/sgls.md diff --git a/pub/doc/hoon/runes/sg/sgpm.md b/pub/docs/dev/hoon/runes/sg/sgpm.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgpm.md rename to pub/docs/dev/hoon/runes/sg/sgpm.md diff --git a/pub/doc/hoon/runes/sg/sgts.md b/pub/docs/dev/hoon/runes/sg/sgts.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgts.md rename to pub/docs/dev/hoon/runes/sg/sgts.md diff --git a/pub/doc/hoon/runes/sg/sgwt.md b/pub/docs/dev/hoon/runes/sg/sgwt.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgwt.md rename to pub/docs/dev/hoon/runes/sg/sgwt.md diff --git a/pub/doc/hoon/runes/sg/sgzp.md b/pub/docs/dev/hoon/runes/sg/sgzp.md similarity index 100% rename from pub/doc/hoon/runes/sg/sgzp.md rename to pub/docs/dev/hoon/runes/sg/sgzp.md diff --git a/pub/doc/hoon/runes/sm.md b/pub/docs/dev/hoon/runes/sm.md similarity index 100% rename from pub/doc/hoon/runes/sm.md rename to pub/docs/dev/hoon/runes/sm.md diff --git a/pub/doc/hoon/runes/sm/smcl.md b/pub/docs/dev/hoon/runes/sm/smcl.md similarity index 100% rename from pub/doc/hoon/runes/sm/smcl.md rename to pub/docs/dev/hoon/runes/sm/smcl.md diff --git a/pub/doc/hoon/runes/sm/smcn.md b/pub/docs/dev/hoon/runes/sm/smcn.md similarity index 100% rename from pub/doc/hoon/runes/sm/smcn.md rename to pub/docs/dev/hoon/runes/sm/smcn.md diff --git a/pub/doc/hoon/runes/sm/smdq.md b/pub/docs/dev/hoon/runes/sm/smdq.md similarity index 100% rename from pub/doc/hoon/runes/sm/smdq.md rename to pub/docs/dev/hoon/runes/sm/smdq.md diff --git a/pub/doc/hoon/runes/sm/smhp.md b/pub/docs/dev/hoon/runes/sm/smhp.md similarity index 100% rename from pub/doc/hoon/runes/sm/smhp.md rename to pub/docs/dev/hoon/runes/sm/smhp.md diff --git a/pub/doc/hoon/runes/sm/smls.md b/pub/docs/dev/hoon/runes/sm/smls.md similarity index 100% rename from pub/doc/hoon/runes/sm/smls.md rename to pub/docs/dev/hoon/runes/sm/smls.md diff --git a/pub/doc/hoon/runes/sm/smsg.md b/pub/docs/dev/hoon/runes/sm/smsg.md similarity index 100% rename from pub/doc/hoon/runes/sm/smsg.md rename to pub/docs/dev/hoon/runes/sm/smsg.md diff --git a/pub/doc/hoon/runes/sm/smsm.md b/pub/docs/dev/hoon/runes/sm/smsm.md similarity index 100% rename from pub/doc/hoon/runes/sm/smsm.md rename to pub/docs/dev/hoon/runes/sm/smsm.md diff --git a/pub/doc/hoon/runes/sm/smtr.md b/pub/docs/dev/hoon/runes/sm/smtr.md similarity index 100% rename from pub/doc/hoon/runes/sm/smtr.md rename to pub/docs/dev/hoon/runes/sm/smtr.md diff --git a/pub/doc/hoon/runes/sm/smzz.md b/pub/docs/dev/hoon/runes/sm/smzz.md similarity index 100% rename from pub/doc/hoon/runes/sm/smzz.md rename to pub/docs/dev/hoon/runes/sm/smzz.md diff --git a/pub/doc/hoon/runes/ts.md b/pub/docs/dev/hoon/runes/ts.md similarity index 100% rename from pub/doc/hoon/runes/ts.md rename to pub/docs/dev/hoon/runes/ts.md diff --git a/pub/doc/hoon/runes/ts/tsbr.md b/pub/docs/dev/hoon/runes/ts/tsbr.md similarity index 100% rename from pub/doc/hoon/runes/ts/tsbr.md rename to pub/docs/dev/hoon/runes/ts/tsbr.md diff --git a/pub/doc/hoon/runes/ts/tscl.md b/pub/docs/dev/hoon/runes/ts/tscl.md similarity index 100% rename from pub/doc/hoon/runes/ts/tscl.md rename to pub/docs/dev/hoon/runes/ts/tscl.md diff --git a/pub/doc/hoon/runes/ts/tsdt.md b/pub/docs/dev/hoon/runes/ts/tsdt.md similarity index 100% rename from pub/doc/hoon/runes/ts/tsdt.md rename to pub/docs/dev/hoon/runes/ts/tsdt.md diff --git a/pub/doc/hoon/runes/ts/tsgl.md b/pub/docs/dev/hoon/runes/ts/tsgl.md similarity index 100% rename from pub/doc/hoon/runes/ts/tsgl.md rename to pub/docs/dev/hoon/runes/ts/tsgl.md diff --git a/pub/doc/hoon/runes/ts/tsgr.md b/pub/docs/dev/hoon/runes/ts/tsgr.md similarity index 100% rename from pub/doc/hoon/runes/ts/tsgr.md rename to pub/docs/dev/hoon/runes/ts/tsgr.md diff --git a/pub/doc/hoon/runes/ts/tshp.md b/pub/docs/dev/hoon/runes/ts/tshp.md similarity index 100% rename from pub/doc/hoon/runes/ts/tshp.md rename to pub/docs/dev/hoon/runes/ts/tshp.md diff --git a/pub/doc/hoon/runes/ts/tskt.md b/pub/docs/dev/hoon/runes/ts/tskt.md similarity index 100% rename from pub/doc/hoon/runes/ts/tskt.md rename to pub/docs/dev/hoon/runes/ts/tskt.md diff --git a/pub/doc/hoon/runes/ts/tsls.md b/pub/docs/dev/hoon/runes/ts/tsls.md similarity index 100% rename from pub/doc/hoon/runes/ts/tsls.md rename to pub/docs/dev/hoon/runes/ts/tsls.md diff --git a/pub/doc/hoon/runes/ts/tssg.md b/pub/docs/dev/hoon/runes/ts/tssg.md similarity index 100% rename from pub/doc/hoon/runes/ts/tssg.md rename to pub/docs/dev/hoon/runes/ts/tssg.md diff --git a/pub/doc/hoon/runes/ts/tstr.md b/pub/docs/dev/hoon/runes/ts/tstr.md similarity index 100% rename from pub/doc/hoon/runes/ts/tstr.md rename to pub/docs/dev/hoon/runes/ts/tstr.md diff --git a/pub/doc/hoon/runes/wt.md b/pub/docs/dev/hoon/runes/wt.md similarity index 100% rename from pub/doc/hoon/runes/wt.md rename to pub/docs/dev/hoon/runes/wt.md diff --git a/pub/doc/hoon/runes/wt/wtbr.md b/pub/docs/dev/hoon/runes/wt/wtbr.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtbr.md rename to pub/docs/dev/hoon/runes/wt/wtbr.md diff --git a/pub/doc/hoon/runes/wt/wtcl.md b/pub/docs/dev/hoon/runes/wt/wtcl.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtcl.md rename to pub/docs/dev/hoon/runes/wt/wtcl.md diff --git a/pub/doc/hoon/runes/wt/wtdt.md b/pub/docs/dev/hoon/runes/wt/wtdt.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtdt.md rename to pub/docs/dev/hoon/runes/wt/wtdt.md diff --git a/pub/doc/hoon/runes/wt/wtgl.md b/pub/docs/dev/hoon/runes/wt/wtgl.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtgl.md rename to pub/docs/dev/hoon/runes/wt/wtgl.md diff --git a/pub/doc/hoon/runes/wt/wtgr.md b/pub/docs/dev/hoon/runes/wt/wtgr.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtgr.md rename to pub/docs/dev/hoon/runes/wt/wtgr.md diff --git a/pub/doc/hoon/runes/wt/wthp.md b/pub/docs/dev/hoon/runes/wt/wthp.md similarity index 100% rename from pub/doc/hoon/runes/wt/wthp.md rename to pub/docs/dev/hoon/runes/wt/wthp.md diff --git a/pub/doc/hoon/runes/wt/wthz.md b/pub/docs/dev/hoon/runes/wt/wthz.md similarity index 100% rename from pub/doc/hoon/runes/wt/wthz.md rename to pub/docs/dev/hoon/runes/wt/wthz.md diff --git a/pub/doc/hoon/runes/wt/wtkt.md b/pub/docs/dev/hoon/runes/wt/wtkt.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtkt.md rename to pub/docs/dev/hoon/runes/wt/wtkt.md diff --git a/pub/doc/hoon/runes/wt/wtkz.md b/pub/docs/dev/hoon/runes/wt/wtkz.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtkz.md rename to pub/docs/dev/hoon/runes/wt/wtkz.md diff --git a/pub/doc/hoon/runes/wt/wtls.md b/pub/docs/dev/hoon/runes/wt/wtls.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtls.md rename to pub/docs/dev/hoon/runes/wt/wtls.md diff --git a/pub/doc/hoon/runes/wt/wtlz.md b/pub/docs/dev/hoon/runes/wt/wtlz.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtlz.md rename to pub/docs/dev/hoon/runes/wt/wtlz.md diff --git a/pub/doc/hoon/runes/wt/wtpm.md b/pub/docs/dev/hoon/runes/wt/wtpm.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtpm.md rename to pub/docs/dev/hoon/runes/wt/wtpm.md diff --git a/pub/doc/hoon/runes/wt/wtpt.md b/pub/docs/dev/hoon/runes/wt/wtpt.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtpt.md rename to pub/docs/dev/hoon/runes/wt/wtpt.md diff --git a/pub/doc/hoon/runes/wt/wtpz.md b/pub/docs/dev/hoon/runes/wt/wtpz.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtpz.md rename to pub/docs/dev/hoon/runes/wt/wtpz.md diff --git a/pub/doc/hoon/runes/wt/wtsg.md b/pub/docs/dev/hoon/runes/wt/wtsg.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtsg.md rename to pub/docs/dev/hoon/runes/wt/wtsg.md diff --git a/pub/doc/hoon/runes/wt/wtsz.md b/pub/docs/dev/hoon/runes/wt/wtsz.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtsz.md rename to pub/docs/dev/hoon/runes/wt/wtsz.md diff --git a/pub/doc/hoon/runes/wt/wtts.md b/pub/docs/dev/hoon/runes/wt/wtts.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtts.md rename to pub/docs/dev/hoon/runes/wt/wtts.md diff --git a/pub/doc/hoon/runes/wt/wttz.md b/pub/docs/dev/hoon/runes/wt/wttz.md similarity index 100% rename from pub/doc/hoon/runes/wt/wttz.md rename to pub/docs/dev/hoon/runes/wt/wttz.md diff --git a/pub/doc/hoon/runes/wt/wtzp.md b/pub/docs/dev/hoon/runes/wt/wtzp.md similarity index 100% rename from pub/doc/hoon/runes/wt/wtzp.md rename to pub/docs/dev/hoon/runes/wt/wtzp.md diff --git a/pub/doc/hoon/runes/zp.md b/pub/docs/dev/hoon/runes/zp.md similarity index 100% rename from pub/doc/hoon/runes/zp.md rename to pub/docs/dev/hoon/runes/zp.md diff --git a/pub/doc/hoon/runes/zp/zpcb.md b/pub/docs/dev/hoon/runes/zp/zpcb.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpcb.md rename to pub/docs/dev/hoon/runes/zp/zpcb.md diff --git a/pub/doc/hoon/runes/zp/zpcm.md b/pub/docs/dev/hoon/runes/zp/zpcm.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpcm.md rename to pub/docs/dev/hoon/runes/zp/zpcm.md diff --git a/pub/doc/hoon/runes/zp/zpfs.md b/pub/docs/dev/hoon/runes/zp/zpfs.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpfs.md rename to pub/docs/dev/hoon/runes/zp/zpfs.md diff --git a/pub/doc/hoon/runes/zp/zpgr.md b/pub/docs/dev/hoon/runes/zp/zpgr.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpgr.md rename to pub/docs/dev/hoon/runes/zp/zpgr.md diff --git a/pub/doc/hoon/runes/zp/zpsm.md b/pub/docs/dev/hoon/runes/zp/zpsm.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpsm.md rename to pub/docs/dev/hoon/runes/zp/zpsm.md diff --git a/pub/doc/hoon/runes/zp/zpts.md b/pub/docs/dev/hoon/runes/zp/zpts.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpts.md rename to pub/docs/dev/hoon/runes/zp/zpts.md diff --git a/pub/doc/hoon/runes/zp/zpwt.md b/pub/docs/dev/hoon/runes/zp/zpwt.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpwt.md rename to pub/docs/dev/hoon/runes/zp/zpwt.md diff --git a/pub/doc/hoon/runes/zp/zpzp.md b/pub/docs/dev/hoon/runes/zp/zpzp.md similarity index 100% rename from pub/doc/hoon/runes/zp/zpzp.md rename to pub/docs/dev/hoon/runes/zp/zpzp.md diff --git a/pub/doc/hoon/tutorial.md b/pub/docs/dev/hoon/tutorial.md similarity index 100% rename from pub/doc/hoon/tutorial.md rename to pub/docs/dev/hoon/tutorial.md diff --git a/pub/doc/hoon/tutorial/0-nouns.md b/pub/docs/dev/hoon/tutorial/0-nouns.md similarity index 100% rename from pub/doc/hoon/tutorial/0-nouns.md rename to pub/docs/dev/hoon/tutorial/0-nouns.md diff --git a/pub/doc/hoon/tutorial/1-twigs.md b/pub/docs/dev/hoon/tutorial/1-twigs.md similarity index 100% rename from pub/doc/hoon/tutorial/1-twigs.md rename to pub/docs/dev/hoon/tutorial/1-twigs.md diff --git a/pub/doc/hoon/tutorial/2-syntax.md b/pub/docs/dev/hoon/tutorial/2-syntax.md similarity index 99% rename from pub/doc/hoon/tutorial/2-syntax.md rename to pub/docs/dev/hoon/tutorial/2-syntax.md index 34cf313e3a..3864ba48c9 100644 --- a/pub/doc/hoon/tutorial/2-syntax.md +++ b/pub/docs/dev/hoon/tutorial/2-syntax.md @@ -184,8 +184,8 @@ a digraph - a sequence of two ASCII glyphs. If you know C, you know digraphs like `->` and `?:` and are used to reading them as single characters. -In Hoon you can *say* them as words: "dasran" or "dart" for `->`, -and "wutcol" for `?:`. In a metalhead language, if we had to say +In Hoon you can *say* them as words: "dasran" and "wattis" +respectively. In a metalhead language, if we had to say "minus greater-than" and "question-colon", we'd just die. Most twig stems are made from runes, by concatenating the glyph diff --git a/pub/doc/hoon/tutorial/3-program.md b/pub/docs/dev/hoon/tutorial/3-program.md similarity index 100% rename from pub/doc/hoon/tutorial/3-program.md rename to pub/docs/dev/hoon/tutorial/3-program.md diff --git a/pub/doc/hoon/tutorial/4-functions.md b/pub/docs/dev/hoon/tutorial/4-functions.md similarity index 100% rename from pub/doc/hoon/tutorial/4-functions.md rename to pub/docs/dev/hoon/tutorial/4-functions.md diff --git a/pub/docs/dev/interpreter.mdy b/pub/docs/dev/interpreter.mdy new file mode 100644 index 0000000000..7250a3f7e0 --- /dev/null +++ b/pub/docs/dev/interpreter.mdy @@ -0,0 +1,12 @@ +--- +logo: black +title: Interpreter +sort: 5 +hide: true +--- + +# Interpreter + +The Urbit interpreter is written in C. + +Watch this space for actual documentation. \ No newline at end of file diff --git a/pub/doc/interpreter/u3.md b/pub/docs/dev/interpreter/u3.md similarity index 99% rename from pub/doc/interpreter/u3.md rename to pub/docs/dev/interpreter/u3.md index b9e687418a..f588088570 100644 --- a/pub/doc/interpreter/u3.md +++ b/pub/docs/dev/interpreter/u3.md @@ -1,4 +1,4 @@ -# u3: noun processing in C... +# u3: noun processing in C `u3` is the C library that makes Urbit work. If it wasn't called `u3`, it might be called `libnoun` - it's a library for making diff --git a/pub/docs/dev/nock.mdy b/pub/docs/dev/nock.mdy new file mode 100644 index 0000000000..a931493239 --- /dev/null +++ b/pub/docs/dev/nock.mdy @@ -0,0 +1,52 @@ +--- +logo: black +title: Nock +sort: 1 +--- + +# Nock + +Nock is a homoiconic combinator algebra, not much fancier than SKI +combinators. We'll have some tutorials soon. The spec: + +``` +A noun is an atom or a cell. +An atom is a natural number. +A cell is an ordered pair of nouns. + +nock(a) *a +[a b c] [a [b c]] + +?[a b] 0 +?a 1 ++[a b] +[a b] ++a 1 + a +=[a a] 0 +=[a b] 1 +=a =a + +/[1 a] a +/[2 a b] a +/[3 a b] b +/[(a + a) b] /[2 /[a b]] +/[(a + a + 1) b] /[3 /[a b]] +/a /a + +*[a [b c] d] [*[a b c] *[a d]] + +*[a 0 b] /[b a] +*[a 1 b] b +*[a 2 b c] *[*[a b] *[a c]] +*[a 3 b] ?*[a b] +*[a 4 b] +*[a b] +*[a 5 b] =*[a b] + +*[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] +*[a 7 b c] *[a 2 b 1 c] +*[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c] +*[a 9 b c] *[a 7 c 2 [0 1] 0 b] +*[a 10 [b c] d] *[a 8 c 7 [0 3] d] +*[a 10 b c] *[a c] + +*a *a +``` diff --git a/pub/doc/nock/reference.md b/pub/docs/dev/nock/reference.md similarity index 100% rename from pub/doc/nock/reference.md rename to pub/docs/dev/nock/reference.md diff --git a/pub/docs/dev/nock/tutorial.md b/pub/docs/dev/nock/tutorial.md new file mode 100755 index 0000000000..c3a4c4ce59 --- /dev/null +++ b/pub/docs/dev/nock/tutorial.md @@ -0,0 +1,1044 @@ +--- +layout: post +category: doc +title: Chapter 2 : Crash course in Nock +--- + +So let's learn Nock! But wait - why learn Nock? After all, +we're going to be programming in Hoon, not Nock. + +Like JVM bytecode, Nock is as inscrutable as assembly language. +In fact, you can think of it as a sort of "functional assembly +language." There are sometimes reasons to program in real +assembly language. There is never a reason to program in Nock. +Except to learn Nock. + +Indeed, it is not necessary for the Hoon programmer to learn +Nock. We recommend it strongly, however, because Hoon has a very +special relationship with Nock - not unlike the relationship +between C and assembly language. + +Just as C is a very shallow layer over the raw CPU, Hoon is a +very shallow layer over raw Nock - often little more than a +macro. If you try to learn C without understanding the CPU under +it, you will be forever bemused by why it works the way it does. + +So let's learn Nock! But wait - which Nock? Nock, though more +frozen than Walt Disney, does have versions. Nock versions are +measured by integer degrees Kelvin, newer being colder. The +newest, Nock 5K - roughly the temperature of Neptune. No change +is anticipated between 5K and absolute zero, though you never +know. Any such change would certainly be quite painful. + +#1.1 Definition# + +The best way to learn Nock is to read the spec and write your own +naive interpreter. Here is Nock 5K: + +**1. Structures** + + A noun is an atom or a cell. An atom is a natural number. + A cell is an ordered pair of nouns. + +**2. Pseudocode** + + 1 :: nock(a) *a + 2 :: [a b c] [a [b c]] + 3 :: + 4 :: ?[a b] 0 + 5 :: ?a 1 + 6 :: +[a b] +[a b] + 7 :: +a 1 + a + 8 :: =[a a] 0 + 9 :: =[a b] 1 + 10 :: =a =a + 11 :: + 12 :: /[1 a] a + 13 :: /[2 [a b]] a + 14 :: /[3 [a b]] b + 15 :: /[(a + a) b] /[2 /[a b]] + 16 :: /[(a + a + 1) b] /[3 /[a b]] + 17 :: /a /a + 18 :: + 19 :: *[a [[b c] d]] [*[a [b c]] *[a d]] + 20 :: + 21 :: *[a [0 b]] /[b a] + 22 :: *[a [1 b]] b + 23 :: *[a [2 [b c]]] *[*[a b] *[a c]] + 24 :: *[a [3 b]] ?*[a b] + 25 :: *[a [4 b]] +*[a b] + 26 :: *[a [5 b]] =*[a b] + 27 :: + 28 :: *[a [6 [b [c d]]]] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] + 29 :: *[a [7 [b c]]] *[a 2 b 1 c] + 30 :: *[a [8 [b c]]] *[a 7 [[7 [0 1] b] 0 1] c] + 31 :: *[a [9 [b c]]] *[a 7 c 2 [0 1] 0 b] + 32 :: *[a [10 [[b c] d]]] *[a 8 c 7 [0 3] d] + 33 :: *[a [10 [b c]]] *[a c] + 34 :: + 35 :: *a *a + +Your interpreter should be no more than a page of code in +any language. For extra credit, `6`-`10` are macros; implement +them directly. For extra extra credit, optimize tail calls. +To test your code, write a decrement formula b such that +`*[a b]` is `a - 1` for any atomic nonzero `a`. + +#1.2 Installation# + +The second best way to learn Nock is to boot up your own Arvo +virtual computer. See the Arvo tutorial for instructions. + +#1.3 Nock# + +To make Nock make sense, let's work through Nock 5K line by line. +First the data model: + +##1. Structures## + + A noun is an atom or a cell. An atom is any natural number. + A cell is any ordered pair of nouns. + +Nouns are the dumbest data model ever. Nouns make JSON look like +XML and XML look like ASN.1. It may also remind you of Lisp's +S-expressions - you can think of nouns as "S-expressions without +the S." + +To be exact, a noun _is_ an S-expression, except that classic +S-expressions have multiple atom types ("S" is for "symbol"). +Since Nock is designed to be used with a higher-level type system +(such as Hoon's), it does not need low-level types. An atom is +just an unsigned integer of any size. + +For instance, it's common to represent strings (or even whole +text files) as atoms, arranging them LSB first - so "foo" becomes +`0x6f6f66`. How do we know to print this as "foo", not `0x6f6f66`? +We need external information - such as a Hoon type. Similarly, +other common atomic types - signed integers, floating point, etc +- are all straightforward to map into atoms. + +It's also important to note that, unlike Lisp, Nock cannot create +cyclical data structures. It is normal and common for nouns in a +Nock runtime system to have acyclic structure - shared subtrees. +But there is no Nock computation that can make a child point to +its parent. One consequence: Nock has no garbage collector. +(Nor can dag structure be detected, as with Lisp `eq`.) + +There is also no single syntax for nouns. If you have nouns you +have Nock; if you have Nock you have Hoon; if you have Hoon, you +can write whatever parser you like. + +Let's continue: + +##2. Pseudocode## + +It's important to recognize that the pseudocode of the Nock spec +is just that: pseudocode. It looks a little like Hoon. It isn't +Hoon - it's just pseudocode. Or in other words, just English. +At the bottom of every formal system is a system of axioms, which +can only be written in English. (Why pseudocode, not Hoon? Since +Hoon is defined in Nock, this would only give a false impression +of nonexistent precision.) + +The logic of this pseudocode is a pattern-matching reduction, +matching from the top down. To compute Nock, repeatedly reduce +with the first line that matches. Let's jump right in! + +##Line 1:## + + 1 :: nock(a) *a + +Nock is a pure (stateless) function from noun to noun. In our +pseudocode (and only in our pseudocode) we express this with the +prefix operator `*`. + +This function is defined for every noun, but on many nouns it +does nothing useful. For instance, if `a` is an atom, `*a` +reduces to... `*a`. In theory, this means that Nock spins +forever in an infinite loop. In other words, Nock produces no +result - and in practice, your interpreter will stop. + +(Another way to see this is that Nock has "crash-only" semantics. +There is no exception mechanism. The only way to catch Nock +errors is to simulate Nock in a higher-level virtual Nock - +which, in fact, we do all the time. A simulator (or a practical +low-level interpreter) can report, out of band, that Nock would +not terminate. It cannot recognize all infinite loops, of +course, but it can catch the obvious ones - like `*42`.) + +Normally `a` in `nock(a)` is a cell `[s f]`, or as we say + + [subject formula] + +Intuitively, the formula is your function and the subject is +its argument. We call them something different because Hoon, +or any other high-level language built on Nock, will build its +own function calling convention which *does not* map directly +to `*[subject formula]`. + +##Line 2:## + + 2 :: [a b c] [a [b c]] + +Ie, brackets (in our pseudocode, as in Hoon) associate to the +right. For those with Lisp experience, it's important to note +that Nock and Hoon use tuples or "improper lists" much more +heavily than Lisp. The list terminator, normally 0, is never +automatic. So the Lisp list + + (a b c) + +becomes the Nock noun + + [a b c 0] + +which is equivalent to + + [a [b [c 0]]] + +Note that we can and do use unnecessary brackets anyway, for +emphasis. + +Let's move on to the axiomatic functions. + +##Lines 4-10:## + + 4 :: ?[a b] 0 + 5 :: ?a 1 + 6 :: +[a b] +[a b] + 7 :: +a 1 + a + 8 :: =[a a] 0 + 9 :: =[a b] 1 + +Here we define more pseudocode operators, which we'll use in +reductions further down. So far we have four built-in functions: +`*` meaning Nock itself, `?` testing whether a noun is a cell or +an atom, `+` incrementing an atom, and `=` testing for equality. +Again, no rocket science here. + +We should note that in Nock and Hoon, `0` (pronounced "yes") is +true, and `1` ("no") is false. Why? It's fresh, it's different, +it's new. And it's annoying. And it keeps you on your toes. +And it's also just intuitively right. + + +##Lines 12-16:## + + 12 :: /[1 a] a + 13 :: /[2 a b] a + 14 :: /[3 a b] b + 15 :: /[(a + a) b] /[2 /[a b]] + 16 :: /[(a + a + 1) b] /[3 /[a b]] + +Slightly more interesting is our tree numbering. Every noun is of course a tree. The `/` operator - pronounced +"slot" - imposes an address space on that tree, mapping every +nonzero atom to a tree position. + +1 is the root. The head of every node `n` is `2n`; the tail is +`2n+1`. Thus a simple tree: + + 1 + 2 3 + 4 5 6 7 + 14 15 + +If the value of every leaf is its tree address, this tree is + + [[4 5] [6 14 15]] + +and, for some examples of addressing: + + /[1 [[4 5] [6 14 15]]] + +is `[[4 5] [6 14 15]]` + + /[2 [[4 5] [6 14 15]]] + +is `[4 5]` + + /[3 [[4 5] [6 14 15]]] + +is `[6 14 15]`, and + + /[7 [[4 5] [6 14 15]]] + +is `[14 15]` + +I do hope this isn't so terribly hard to follow. + +##Line 21:## + +Now we enter the definition of Nock itself - ie, the `*` +operator. + + 21 :: *[a 0 b] /[b a] + +`0` is simply Nock's tree-addressing operator. Let's try it out +from the Arvo command line. + +Note that we're using Hoon syntax here. Since we do not use Nock +from Hoon all that often (it's sort of like embedding assembly in +C), we've left it a little cumbersome. In Hoon, instead of +writing `*[a 0 b]`, we write + + .*(a [0 b]) + +So, to reuse our slot example, let's try the interpreter: + + ~tasfyn-partyv> .*([[4 5] [6 14 15]] [0 7]) + +gives, while the sky remains blue and the sun rises in the east: + + [14 15] + +Even stupider is line 21: + +##Line 21:## + + 21 :: *[a 1 b] b + +`1` is the constant operator. It produces its argument without +reference to the subject. So + + ~tasfyn-partyv> .*(42 [1 153 218]) + +yields + + [153 218] + + +##Line 23:## + + 23 :: *[a 2 b c] *[*[a b] *[a c]] + +Line 22 brings us the essential magic of recursion. +`2` is the Nock operator. If you can compute a subject and a +formula, you can evaluate them in the interpreter. In most +fundamental languages, like Lisp, `eval` is a curiosity. But +Nock has no `apply` - so all our work gets done with `2`. + +Let's convert the previous example into a stupid use of `2`: + + ~tasfyn-partyv> .*(77 [2 [1 42] [1 1 153 218]]) + +with a constant subject and a constant formula, gives the same + + [153 218] + +Like so: + + *[77 [2 [1 42] [1 1 153 218]]] + + 22 :: *[a 2 b c] *[*[a b] *[a c]] + + *[*[77 [1 42]] *[77 [1 1 153 218]]] + + 21 :: *[a 1 b] b + + *[42 *[77 [1 1 153 218]]] + + *[42 1 153 218] + + [153 218] + +##Lines 24-26:## + + 24 :: *[a 3 b] ?*[a b] + 25 :: *[a 4 b] +*[a b] + 26 :: *[a 5 b] =*[a b] + +In lines 23-25, we meet our axiomatic functions again: + +For instance, if `x` is a formula that calculates some product, +`[4 x]` calculates that product plus one. Hence: + + ~tasfyn-partyv> .*(57 [0 1]) + 57 + +and + + ~tasfyn-partyv> .*([132 19] [0 3]) + 19 + +and + + ~tasfyn-partyv> .*(57 [4 0 1]) + 58 + +and + + ~tasfyn-partyv> .*([132 19] [4 0 3]) + 20 + +If this seems obvious, you're doin' good. Finally, we jump back up +to line 18, the trickiest in the spec: + +##Line 19:## + + 19 :: *[a [b c] d] [*[a b c] *[a d]] + +Um, what? + +Since Nock of an atom just crashes, the practical domain of the +Nock function is always a cell. Conventionally, the head of this +cell is the "subject," the tail is the "formula," and the result +of Nocking it is the "product." Basically, the subject is your +data and the formula is your code. + +We could write line 19 less formally: + + *[subject [formula-x formula-y]] + => [*[subject formula-x] *[subject formula-y]] + +In other words, if you have two Nock formulas `x` and `y`, a +formula that computes the pair of them is just `[x y]`. We can +recognize this because no atom is a valid formula, and +every formula that _does not_ use line 19 has an atomic head. + +If you know Lisp, you can think of this feature as a sort of +"implicit cons." Where in Lisp you would write `(cons x y)`, +in Nock you write `[x y]`. + +For example, + + ~tasfyn-partyv> .*(42 [4 0 1]) + +where `42` is the subject (data) and `[4 0 1]` is the formula +(code), happens to evaluate to `43`. Whereas + + ~tasfyn-partyv> .*(42 [3 0 1]) + +is `1`. So if we evaluate + + ~tasfyn-partyv> .*(42 [[4 0 1] [3 0 1]]) + +we get + + [43 1] + +Except for the crash defaults (lines 6, 10, 17, and 35), we've actually +completed all the _essential_ aspects of Nock. The operators up +through 5 provide all necessary computational functionality. +Nock, though very simple, is actually much more complex than it +formally needs to be. + +Operators 6 through 10 are macros. They exist because Nock is +not a toy, but a practical interpreter. Let's see them all +together: + +##Lines 28-33:## + + 28 :: *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] + 29 :: *[a 7 b c] *[a 2 b 1 c] + 30 :: *[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c] + 31 :: *[a 9 b c] *[a 7 c 2 [0 1] 0 b] + 32 :: *[a 10 [b c] d] *[a 8 c 7 [0 3] d] + 33 :: *[a 10 b c] *[a c] + +Whoa! Have we entered rocket-science territory? Let's try to +figure out what these strange formulas do - simplest first. +The simplest is clearly line 33: + + 33 :: *[a 10 b c] *[a c] + +If `x` is an atom and `y` is a formula, the formula `[10 x y]` +appears to be equivalent to... `y`. For instance: + + ~tasfyn-partyv> .*([132 19] [10 37 [4 0 3]]) + 20 + +Why would we want to do this? `10` is actually a hint operator. +The `37` in this example is discarded information - it is not +used, formally, in the computation. It may help the interpreter +compute the expression more efficiently, however. + +Every Nock computes the same result - but not all at the same +speed. What hints are supported? What do they do? Hints are a +higher-level convention which do not, and should not, appear in +the Nock spec. Some are defined in Hoon. Indeed, a naive Nock +interpreter not optimized for Hoon will run Hoon quite poorly. +When it gets the product, however, the product will be right. + +There is another reduction for hints - line 32: + + 32 :: *[a 10 [b c] d] *[a 8 c 7 [0 3] d] + +Once we see what `7` and `8` do, we'll see that this complex hint +throws away an arbitrary `b`, but computes the formula `c` +against the subject and... throws away the product. This formula +is simply equivalent to `d`. Of course, in practice the product +of `c` will be put to some sordid and useful use. It could even +wind up as a side effect, though we try not to get _that_ sordid. + +(Why do we even care that `c` is computed? Because `c` could +crash. A correct Nock cannot simply ignore it, and treat both +variants of `10` as equivalent.) + +We move on to the next simplest operator, `7`. Line 29: + + 29 :: *[a 7 b c] *[a 2 b 1 c] + +Suppose we have two formulas, `b` and `c`. What is the formula +`[7 b c]`? This example will show you: + + ~tasfyn-partyv> .*(42 [7 [4 0 1] [4 0 1]]) + 44 + +`7` is an old mathematical friend, function composition. It's +easy to see how this is built out of `2`. The data to evaluate +is simply `b`, and the formula is `c` quoted. + +Line 30 looks very similar: + + 30 :: *[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c] + +Indeed, `8` is `7`, except that the subject for `c` is not simply +the product of `b`, but the ordered pair of the product of `b` +and the original subject. Hence: + + ~tasfyn-partyv> .*(42 [8 [4 0 1] [0 1]]) + [43 42] + +and + + ~tasfyn-partyv> .*(42 [8 [4 0 1] [4 0 3]]) + 43 + +Why would we want to do this? Imagine a higher-level language +in which the programmer declares a variable. This language is +likely to generate an `8`, because the variable is computed +against the present subject, and used in a calculation which +depends both on the original subject and the new variable. + +For extra credit, explain why we can't just define + + *[a 8 b c] *[a 7 [b 0 1] c] + +Another simple macro is line 31: + + 31 :: *[a 9 b c] *[a 7 c 2 [0 1] 0 b] + +`9` is a calling convention. With `c`, we produce a noun which +contains both code and data - a _core_. We use this core as the +subject, and apply the formula within it at slot `b`. + +And finally, we come to the piece de resistance - line 28: + + 28 :: *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] + +Great giblets! WTF is this doing? It seems we've finally +arrived at some real rocket science. + +Actually, `6` is a primitive known to every programmer - good old +"if." If `b` evaluates to `0`, we produce `c`; if `b` evaluates +to `1`, we produce `d`; otherwise, we crash. + +For instance: + + ~tasfyn-partyv> .*(42 [6 [1 0] [4 0 1] [1 233]]) + 43 + +and + + ~tasfyn-partyv> .*(42 [6 [1 1] [4 0 1] [1 233]]) + 233 + +In real life, of course, the Nock implementor knows that `6` is +"if" and implements it as such. There is no practical sense in +reducing through this macro, or any of the others. We could have +defined "if" as a built-in function, like increment - except that +we can write "if" as a macro. If a funky macro. + +It's a good exercise, however, to peek inside the funk. + +We can actually simplify the semantics of `6`, at the expense of +breaking the system a little, by creating a macro that works as +"if" only if `b` is a proper boolean and produces `0` or `1`. +Perhaps we have a higher-level type system which checks this. + +This simpler "if" would be: + + *[a 6 b c d] *[a [2 [0 1] [2 [1 c d] [[1 0] [4 4 b]]]]] + +Or without so many unnecessary brackets: + + *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] [4 4 b]] + +How does this work? We've replaced `[6 b c d]` with the formula +`[2 [0 1] [2 [1 c d] [[1 0] [4 4 b]]]]`. We see two uses of `2`, +our evaluation operator - an outer and an inner. + +Call the inner one `i`. So we have `[2 [0 1] i]`. Which means +that, to calculate our product, we use `[0 1]` - that is, the +original subject - as the subject; and the product of `i` as +the formula. + +Okay, cool. So `i` is `[2 [1 c d] [[1 0] [4 4 b]]]`. We compute +Nock with subject `[1 c d]`, formula `[[1 0] [4 4 b]]`. + +Obviously, `[1 c d]` produces just `[c d]` - that is, the ordered +pair of the "then" and "else" formulas. `[[1 0] [4 4 b]]` is a +line 19 cell - its head is `[1 0]`, producing just `0`, its tail +`[4 4 b]`, producing... what? Well, if `[4 b]` is `b` plus `1`, +`[4 4 b]` is `b` plus `2`. + +We're assuming that `b` produces either `0` or `1`. So `[4 4 b]` +yields either `2` or `3`. `[[1 0] [4 4 b]]` is either `[0 2]` or +`[0 3]`. Applied to the subject `[c d]`, this gives us either +`c` or `d` - the product of our inner evaluation `i`. This is +applied to the original subject, and the result is "if." + +But we need the full power of the funk, because if `b` produces, +say, `7`, all kinds of weirdness will result. We'd really like +`6` to just crash if the test product is not a boolean. How can +we accomplish this? This is an excellent way to prove to +yourself that you understand Nock: figure out what the real `6` +does. Or you could just agree that `6` is "if," and move on. + +(It's worth noting that in practical, compiler-generated Nock, we +never do anything as funky as these `6` macro internals. There's +no reason we couldn't build formulas at runtime, but we have no +reason to and we don't - except when actually metaprogramming. +As in most languages, normally code is code and data is data.) + +#1.4 Decrement in Nock# + +A good practice exercise for Nock is a decrement formula. Ie, a +formula `f` which implements the partial function that produces +`(s - 1)` if `s` is a nonzero atom, and otherwise does not +terminate. + +The normal Hoon programmer has written one Nock formula: this +one. Since decrement uses all the Nock techniques the Hoon +compiler uses, the exercise is a good foundation. After you +write decrement (or just follow this example), you'll never need +to deal with Nock again. + +As we know, the equivalent formula for increment is + + [4 0 1] + +Thus: + + ~>tasfyn-partyv .*(42 [4 0 1]) + 43 + +Of course, increment is built into Nock. So, ha, that's easy. + +How do we decrement? A good way to start is to gaze fondly on +how we'd do it if we actually had a real language, ie, Hoon. +Here is a minimal decrement in Hoon: + + => a=. :: line 1 + =+ b=0 :: line 2 + |- :: line 3 + ?: =(a +(b)) :: line 4 + b :: line 5 + $(b +(b)) :: line 6 + +Or for fun, on one line: + + =>(a=. =+(b=0 |-(?:(=(a +(b)) b $(b +(b)))))) + +Does Hoon actually work? + + ~tasfyn-partyv> =>(42 =>(a=. =+(b=0 |-(?:(=(a +(b)) b $(b +(b))))))) + 41 + +Let's translate this into English. How do we decrement the +subject? First (line 1), we rename the subject `a`. Second +(line 2), we add a variable, `b`, an atom with value `0`. +Third (line 3), we loop. Fourth, we test if `a` equals `b` plus +1 (line 4), produce `b` if it does (line 5), repeat the loop with +`b` set to `b` plus 1 (line 6) if it doesn't. Obviously, while +the syntax is unusual, the algorithm is anything but deep. We +are calculating `b` minus one by counting up from `0`. + +(Obviously, this is an O(n) algorithm. Is there a better way? +There is not. Do we actually do this in practice? Yes and no.) + +Unfortunately we are missing a third of our Rosetta stone. We +have decrement in Hoon and we have it in English. How do we +express this in Nock? What will the Hoon compiler generate from +the code above? Let's work through it line by line. + +Nock has no types, variable names, etc. So line 1 is a no-op. + +How do we add a variable (line 2)? We compute a new subject, +which is a cell of the present subject and the variable. With +this new subject, we execute another formula. + +Since `0` is a constant, a formula that produces it is + + [1 0] + +To combine `0` with the subject, we compute + + [[1 0] [0 1]] + +which, if our subject is 42, gives us + + [0 42] + +which we can use as the subject for an inner formula, `g`. +Composing our new variable with `g`, we have `f` as + + [2 [[1 0] [0 1]] [1 g]] + +which seems a little funky for something so simple. But we +can simplify it with the composition macro, `7`: + + [7 [[1 0] [0 1]] g] + +and still further with the augmentation macro, `8`: + + [8 [1 0] g] + +If you refer back to the Nock definition, you'll see that all +these formulas are semantically equivalent. + +Let's continue with our decrement. So what's `g`? We seem to +loop. Does Nock have a loop operator? It most certainly does +not. So what do we do? + +We build a noun called a _core_ - a construct which is behind any +kind of interesting control flow in Hoon. Of course, the Nock +programmer is not constrained to use the same techniques as the +Hoon compiler, but it is probably a good idea. + +In Hoon, all the flow structures from your old life as an Earth +programmer become cores. Functions and/or closures are cores, +objects are cores modules are cores, even loops are cores. + +The core is just a cell whose tail is data (possibly containing +other cores) and whose head is code (containing one or more +formulas). The tail is the _payload_ and the head is the +_battery_. Hence your core is + + [bat pay] + +To activate a core, pick a formula out of the battery, and use +the entire core (_not_ just the payload) as the subject. + +(A core formula is called an _arm_. An arm is almost like an +object-oriented method, but not quite - a method would be an arm +that produces a function on an argument. The arm is just a +function of the core, ie, a computed attribute.) + +Of course, because we feed it the entire core, our arm can +invoke itself (or any other formula in the battery). Hence, it +can loop. And this is what a loop is - the simplest of cores. + +We need to do two things with this core: create it, and activate +it. To be precise, we need two formulas: a formula which +produces the core, and one which activates its subject. We can +compose these functions with the handy `7` operator: + + [8 [1 0] [7 p a]] + +`p` produces our core, `a` activates it. Let's take these in +reverse order. How do we activate a core? + +Since we have only one formula, it's the battery itself. +Thus we want to execute Nock with the whole core (already the +subject, and the entire battery (slot `2`). Hence, `a` is + + [2 [0 1] [0 2]] + +We could also use the handy `9` macro - which almost seems +designed for firing arms on cores: + + [9 2 [0 1]] + +Which leaves us seeking + + [8 [1 0] [7 p [9 2 0 1]]] + +And all we have to do is build the core, `p`. How do we build a +core? We add code to the subject, just as we added a variable +above. The initial value of our counter was a constant, `0`. +The initial (and permanent) value of our battery is a constant, +the loop formula `l`. So `p` is + + [8 [1 l] [0 1]] + +Which would leave us seeking + + [8 [1 0] [7 [8 [1 l] [0 1]] [9 2 0 1]]] + +except that we have duplicated the `8` pattern again, since we +know + + [7 [8 [1 l] [0 1]] [9 2 0 1]] + +is equivalent to + + [8 [1 l] [9 2 0 1]] + +so the full value of `f` is + + [8 [1 0] [8 [1 l] [9 2 0 1]]] + +Thus our only formula to compose is the loop body, `l`. +Its subject is the loop core: + + [bat pay] + +where `bat` is just the loop formula, and `pay` is the pair `[a +b]`, `a` being the input subject, and `b` the counter. Thus we +could also write this subject as + + [l b a] + +and we see readily that `a` is at slot `7`, `b` `6`, `l` `2`. +With this subject, we need to express the Hoon loop body + + ?: =(a +(b)) :: line 4 + b :: line 5 + $(b +(b)) :: line 6 + +This is obviously an if statement, and it calls for `6`. Ie: + + [6 t y n] + +Giving our decrement program as: + + [8 [1 0] [8 [1 6 t y n] [9 2 0 1]]] + +For `t`, how do we compute a flag that is yes (`0`) if `a` equals +`b` plus one? Equals, we recall, is `5`. So `t` can only be + + [5 [0 7] [4 0 6]] + +If so, our product `y` is just the counter `b`: + + [0 6] + +And if not? We have to re-execute the loop with the counter +incremented. If we were executing it with the same counter, +obviously an infinite loop, we could use the same core: + + [9 2 0 1] + +But instead we need to construct a new core with the counter +incremented: + + [l +(b) a] + +ie, + + [[0 2] [4 0 6] [0 7]] + +and `n` is: + + [9 2 [[0 2] [4 0 6] [0 7]]] + +Hence our complete decrement. Let's reformat vertically so we +can actually read it: + + [8 + [1 0] + [ 8 + [ 1 + [ 6 + t + y + n + ] + ] + [9 2 0 1] + ] + ] + +which becomes + + [8 + [1 0] + [ 8 + [ 1 + [ 6 + [5 [0 7] [4 0 6]] + [0 6] + [9 2 [[0 2] [4 0 6] [0 7]]] + ] + ] + [9 2 0 1] + ] + ] + +or, on one line without superfluous brackets: + + [8 [1 0] 8 [1 6 [5 [0 7] 4 0 6] [0 6] 9 2 [0 2] [4 0 6] 0 7] 9 2 0 1] + +which works for the important special case, 42: + + ~tasfyn-partyv> .*(42 [8 [1 0] 8 [1 6 [5 [0 7] 4 0 6] [0 6] 9 2 [0 2] [4 0 6] 0 7] 9 2 0 1]) + 41 + +If you understood this, you understand Nock. At least in principle! + +If you want to play around more with Nock, the command line will +start getting unwieldy. Fortunately, the standard install +contains the above Nock decrement packaged as an Arvo app, which +you can edit and change if you'd like to get ambitious. Just run + + ~tasfyn-partyv> :toy/ndec 19 + 18 + +The file driving this is + + hub/$seat/toy/app/ndec.holw + +Edit this file, ignoring everything above the Nock formula, and +hit return in the console to see it update: + + ~tasfyn-partyv> + : ~tasfyn-partyv/toy/app/ndec/holw/ + +If decrement seems fun - why not write add? I wrote a Nock adder +a long, long time ago. But I've forgotten where I put it. There +is absolutely no use in this exercise, except to prove to +yourself that you've mastered Nock. + +#Appendix A: Operator Reductions# + +##`6` Reduction:## + + 28 :: *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] + + *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] + + 23 :: *[a 2 b c] *[*[a b] *[a c]] + + *[*[a 0 1] *[a 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b]] + + 21 :: *[a 0 b] /[b a] + + *[a *[a 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b]] + + 23 :: *[a 2 b c] *[*[a b] *[a c]] + + *[a *[*[a [1 c d]] *[a [1 0] 2 [1 2 3] [1 0] 4 4 b]]] + + 22 :: *[a 1 b] b + + 19 :: *[a [b c] d] [*[a b c] *[a d]] + + *[a *[[c d] [*[a 1 0] *[a 2 [1 2 3] [1 0] 4 4 b]]]] + + 22 :: *[a 1 b] b + + *[a *[[c d] [0 *[a 2 [1 2 3] [1 0] 4 4 b]]]] + + 23 :: *[a 2 b c] *[*[a b] *[a c]] + + *[a *[[c d] [0 *[*[a [1 2 3]] *[a [1 0] 4 4 b]]]]] + + 22 :: *[a 1 b] b + + *[a *[[c d] [0 *[[2 3] *[a [1 0] 4 4 b]]]]] + + 19 :: *[a [b c] d] [*[a b c] *[a d]] + + *[a *[[c d] [0 *[[2 3] [*[a [1 0]] *[a 4 4 b]]]]]] + + 22 :: *[a 1 b] b + + *[a *[[c d] [0 *[[2 3] [0 *[a 4 4 b]]]]]] + + 25 :: *[a 4 b] +*[a b] + + *[a *[[c d] [0 *[[2 3] [0 ++*[a b]]]]]] + +**`6` Reduced:** + + 6r :: *[a 6 b c d] *[a *[[c d] [0 *[[2 3] [0 ++*[a b]]]]]] + +##`7` Reduction:## + + 29 :: *[a 7 b c] *[a 2 b 1 c] + + *[a 2 b 1 c] + + 23 :: *[a 2 b c] *[*[a b] *[a c]] + + *[*[a b] *[a 1 c]] + + 22: *[a 1 b] b + + *[*[a b] c] + +**`7` Reduced:** + + 7r :: *[a 7 b c] *[*[a b] c] + +##`8` Reduction:## + + 30 :: *[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c] + + *[a 7 [[7 [0 1] b] 0 1] c] + + 7r :: *[a 7 b c] *[*[a b] c] + + *[*[a [7 [0 1] b] 0 1]] c] + + 19 :: *[a [b c] d] [*[a b c] *[a d]] + + *[[*[a [7 [0 1] b]] *[a 0 1]] c] + + 21 :: *[a 0 b] /[b a] + + *[[*[a [7 [0 1] b]] /[1 a]] c] + + 12 :: /[1 a] a + + *[[*[a [7 [0 1] b]] a] c] + + 7r :: *[a 7 b c] *[*[a b] c] + + *[[*[*[a 0 1]] b] a] c] + +**`8` Reduced:** + + 8r :: *[a 8 b c] *[[*[a b] a] c] + + +##`9` Reduction:## + + 31 :: *[a 9 b c] *[a 7 c [2 [0 1] [0 b]]] + + *[a 7 c [2 [0 1] [0 b]]] + + 7r :: *[a 7 b c] *[*[a b] c] + + *[*[a c] [2 [0 1] [0 b]]] + + 23 :: *[a 2 b c] *[*[a b] *[a c]] + + *[*[*[a c] [0 1]] *[*[a c] [0 b]]] + + 21 :: *[a 0 b] /[b a] + +**`9` Reduced:** + + 9r :: *[a 9 b c] *[*[a c] *[*[a c] 0 b]] + + +##`10` Reduction:## + + *[a 10 [b c] d] *[a 8 c 7 [0 3] d] + + 8r :: *[a 8 b c] [[*[a b] a] c] + + *[[*[a c] a] 7 [0 2] d] + + 7r :: *[a 7 b c] *[*[a b] c] + + *[*[[*[a c] a] 0 3] d] + + 21 :: *[a 0 b] /[b a] + +**`10` reduced:** + + 10r :: *[a 10 [b c] d] *[a d] + diff --git a/pub/docs/theory.mdy b/pub/docs/theory.mdy new file mode 100644 index 0000000000..e36d623875 --- /dev/null +++ b/pub/docs/theory.mdy @@ -0,0 +1,26 @@ +--- +title: Theory +sort: 3 +logo: black +--- + +
+ +# Theory + +The only single document with a general overview of Urbit is our +ginormous [whitepaper](theory/whitepaper). The whitepaper has been +described as "sort of like a PhD thesis, but maybe more like +three PhD theses jammed into a single 40-page document." Like +Everest, it's there. + +Some critics experience doubt that Urbit is actually a real +thing. This is forgivable. Sometimes, visual evidence will +correct these disturbed minds. See our 2015 demo +[(part 1)](theory/part-i), [(part 2)](theory/part-ii), or our original +[2013 demo](theory/old). But if the aliens could fake the moon +landings, they could certainly fake Urbit. + + + +
diff --git a/pub/docs/theory/network-goals.mdy b/pub/docs/theory/network-goals.mdy new file mode 100644 index 0000000000..8b4304ddff --- /dev/null +++ b/pub/docs/theory/network-goals.mdy @@ -0,0 +1,565 @@ +--- +title: Network Goals, Part I +sort: 4 +hide: true +--- + +# Constitution of a digital republic: part 1, goals + +Some of us remember when the Internet was a social network. +Today, the Internet is a modem. + +It's a wonderful modem. It connects you to all kinds of great +online services. Some of which are social "networks," but only +networks in the MBA sense. Really they're social *servers*: +giant virtual mainframes running one hardcoded program. 1976 +called -- it wants its acoustic coupler back. + +So, you prefer 1996. So, you wish you had your decentralized +Internet back. So, you don't seem alone in this. So, we know +one thing: wishing hasn't made it happen. + +## John Perry Barlow + +It's interesting to go back and read John Perry Barlow's 1996 +manifesto, the [Cyberspace Declaration of +Independence](https://projects.eff.org/~barlow/Declaration-Final.html), +Some parts of the *Declaration* are dated. Many parts seem +fresh, even urgent. + +But in 2015, what stands out most about this document is its +incredible confidence that the Internet is inherently free, and +easily strong enough to announce and defend its own freedom, +against "Governments of the Industrial World, you weary giants of +flesh and steel." + +Well... hindsight is 20/20. But in hindsight, even in 1996 +things were starting to head south. Usenet -- the brain of the +Internet, when the Internet had a brain -- was already +disintegrating under the barbarian invasions. And where is the +WELL these days? (John Perry Barlow is probably still on it.) + +While the Net has certainly scored a point or two against the +State, the State has scored a lot more points against the Net. +If the State wants your domain name, it takes it. If that's +independence, what does utter defeat and submission look like? + +Worse: whatever state tyranny exists, it's obviously dwarfed by +the private, free-market, *corporate* tyrannosaurs that stalk the +cloud today. We can see this clearly by imagining all these +thunder-lizards were *actually part of the government*. "Private" +and "public" are just labels, after all. + +Imagine a world in which LinkedIn, Facebook, Twitter, Apple and +the NSA were all in one big org chart. Is there anyone, of any +political stripe, who doesn't find this outcome creepy? It's +probably going to happen, in fact if not in form. While formal +nationalization is out of fashion, regulation easily achieves the +same result, while keeping the sacred words "private enteprise." + +Reading Barlow's _Declaration_ is a lot like reading the real +Declaration, in an alternate history where Jefferson lost. In +2015, do we still believe in these goals? Arguably, we believe +in them more than ever. We basically live in 1996's nightmare. +We know exactly what to be afraid of. It's already here. + +But we've lost the ability to believe we can *achieve* these +goals. 20 years ago, digital freedom seemed inevitable. Now it +seems impossible. + +## Engineering digital freedom + +Don't panic! This is a simple case of cause and effect. Digital +freedom isn't inevitable. It's also not impossible. It's quite +possible. It just requires *actual engineering work*. + +If you think a result is inevitable, but it's not, you won't do +the work and you won't get the result. So, let's do the work. +Dealing is way better than worrying. 1996 worried about the +problem; 2016 ought to deal with it. + +A constitution is not a declaration. It's not a list of ideals. +It's more like a bridge -- an actual structure, that fails unless +it stands up to genuine load. A bridge isn't a bridge unless it +works. If you want a bridge, you have to build a bridge. It +doesn't typically happen that you set out to build something +else, but at the end it turns out you've built a bridge. + +The designers of the Internet did not, of course, intend to +implement any of John Perry Barlow's ideals. How close they came +is in a way remarkable. Perhaps it was possible for the +designers of the Internet to build a global, decentralized social +network. They weren't trying to, and they didn't. + +If we want a decentralized social network, we can't do it without +rigorous engineering work. And we can't limit our work to the +world of code. A decentralized network has to work not just +technically -- but politically, economically, and socially. + +Where do we go from here? How do we get back to 1996? Admit +we've failed, and try again. How else? + +## Two axioms + +It's rarely worth arguing over an ugly truth. Either you know +it, or you can't be argued into it. Or it's not true. So it's +better to just give it as an axiom. Two axioms: + +One: the Internet can't be fixed. We *can't* redecentralize the +Internet. It has too many accumulated administrative and +technical misfeatures. + +Two: there is no practical, completely decentralized network. +Government is a human invariant, in the digital world as in the +real world. Even Bitcoin has a central government. + +We can't force you to accept these axioms. We'll just assume +they're true for the rest of this document. This means we're +designing a new, self-governing network on top of the Internet. + +## On digital republics + +The English word *republic* is from the Latin *res publica* - +"public thing." A republic is a government run as a public +trust, without any single point of failure - person or +institution. + +Every (real) democracy is a republic, but not every republic is a +democracy. The distribution of both formal and actual authority +within a republic need not be in any way uniform. (Arguably, it +never is.) + +The design goal of a republic is effective, durable and stable +governance. Humans and servers are inherently fallible. +Therefore this goal cannot be achieved without what humans call +*pluralism* and programmers *redundancy*. Or in other words, +decentralized governance. + +Both the old Internet of the '80s and the distributed social +network built on top of it - Usenet - were very much digital +republics. ICANN still thinks of itself as a republic, but to +some it looks more like a corporation. In previous decades, +influence over collective decisions was more about personal +reputation than corporate authority - the way IETF still works. + +A republic needs a constitution - a set of formal processes that +guides and shapes the real questions of governance, which are +always informal. + +The republic is in a healthy state if its actual power structures +match this constitution. The Soviet Union had a fine +constitution. Its actual authority structures had little to do +with its official structures. + +Again, the digital republic is a machine - if it's not +well-engineered, it won't work. There are three categories of +engineering we have to get right: political, economic, social. + +## Political engineering + +Our ideal network is actually not designed to *be* a digital +republic. It's designed to *become* a digital republic. + +The most basic principle of political engineering is that there +is no one true constitution; the constitution has to fit the +polity. And our polity cannot succeed without changing. And we +care what it ends up as, much more than how it gets there. + +### On young networks + +In a young, small network, digital freedom is irrelevant. There +is no structural conflict of interest between the government and +the users. Everyone in the network is a pioneer, and all +pioneers have the same goal: found the republic. Anyone who +stops believing in the network just leaves. + +The young network is a high-trust society in two ways: trust +between users, and trust in government. The main purpose +of decentralization is to prevent conflict among the distrusting. +Decentralization is superfluous in a high-trust society. + +A young network can't be decentralized *even if it wants to be*. +Consider the early days of Bitcoin. Somewhere between the +initial release and now, there's a point in time T such that +before but not after T, Satoshi himself could have rebooted the +blockchain. + +Not even with a 51% attack - but *just with an email*. He could +have said: I screwed something up, here's a new genesis block. +And everyone would have switched to the new blockchain, at the +mere verbal whim of Emperor Satoshi. + +When Bitcoin was young, it was a centralized network, even though +it had a decentralized constitution. In practice, Satoshi was +above the constitution. Even if he didn't use it, he had an +authority above cryptography. + +### Political engineering in Bitcoin + +There are two layers of sovereignty in Bitcoin. The highest +layer is the choice of blockchain itself - the rule that the +longest chain is best is completely informal. Or to put it +differently, it's the principle that makes Bitcoin Bitcoin and +everything else an altcoin. No math is involved here - just the +agreement of human beings. Or in other words, politics. + +Even the cryptographic layer rests on informal foundations. +The 51% attack on Bitcoin is well-known; any coalition that can +construct one is sovereign. But no such coup has happened, or +will - why? + +Because the coalition is plural. Since each coup supporter would +be acting against its own self-interest by damaging the +reputation of the currency as a whole, the coup requires +self-destructive folly from multiple serious actors. Suicidal +collusion is never a realistic risk - or if it is, nothing can +mitigate it. + +Even if one miner controlled 51%, their incentive against an +attack would be enormous, because they would destroy the +blockchain they captured. On the other hand, not everyone +responds sensibly to incentives. The purpose of a republican +constitution is to eliminate single points of incentive failure. + +Bitcoin is a stable, mature republic - but it is *not* secured +just by cryptography, but also by political engineering. + +### Political engineering in Reddit + +Compare to an unstable, mature non-republic: Reddit. + +2015's Reddit civil war is a dead ringer for one past conflict: +the English Civil War. As Marx and Pareto agree, all major civic +conflict arises when a new social class develops a collective +sense of its right to govern. The danger is most acute when a +governed class senses that the governors do not live up to the +standards of the governed. + +Much as the Puritans looked down on the Cavaliers as immoral, +atheistic fops, the Reddit moderators looked down on the Reddit +staff - or certain parts of it - as faceless corporate drones. +When you've attained real political status in a community, +through hard work and genuine talent, it's contemptible to let +yourself be governed by people without the basic skills to even +pass as competent, much less exceptional, in this community. As +Napoleon said: every regime is safe so long as it is ruled by its +most talented citizens. And if it's not, it isn't. + +But technically, Reddit couldn't just have a French Revolution. +Reddit is an inherently centralized site. It looks like a bunch +of different places, but that's an illusion. It's actually one +big mainframe. The Reddit staff is stuck running this mainframe. +The users have a lot of social capital invested in it. + +If there was a technical mechanism that let the users of Reddit, +collectively and coherently, fork Reddit and take ownership with +a Tennis Court Oath, they certainly would have done so long ago. +The result could only be a digital republic. But they can't, so +Reddit's future is unclear. Man may be born free, but mainframe +guest accounts certainly aren't born free. + +### From monarchy to republic + +In real-world history we see a curious pattern: not only are +republics fairly rare, but every successful republic (from Athens +to Rome to England) started out as a successful monarchy. +Perhaps this is also the right way to build a digital republic? + +Another way to state the political engineering problem that our +new network has to solve: maximize the chance of producing a +mature digital republic. There are two failure cases: failure, +and a mature non-republic. We should prefer the former - "range +safety," as a rocket scientist would say. + +Our conclusion is that a young network is a monarchy (whether +under a BDFL or a faceless corporation), whether it likes it or +not. But the network must be technically designed to *evolve* +into a mature republic. + +And - most critically - the republican evolution *cannot* be +prevented by the monarchical admins. When the evolution has to +happen, the monarchy has every incentive to help it succeed. If +it chooses to interfere instead, it will just get run over. + +Reddit couldn't have a revolution; its code wasn't designed for a +revolution. A new network can and should be designed for just +that. Revolution may not be the ideal way to give birth to a +republic, but it certainly works and it's better than nothing. + +Thus what seems like an optimal political design: the ugly, +centralized, young larva that's designed to molt into a +beautiful, mature, decentralized butterfly. And once mature, the +larva must molt or die -- not keep growing into a gigantic, +man-eating caterpillar of death. + +## Economic engineering + +Economically, a new network should bootstrap. It should be +designed to generate revenue that funds its own development. +Ideally, its operators accept no traditional investment at all. + +A digital-token business is no novelty in the age of Bitcoin. +But a network address space is not at all the same thing as a +digital currency. When we look at current address spaces of +meaningful economic weight - DNS domains, IPv4 addresses, even +Twitter handles - we see not digital money, but digital *land*. + +### On digital land + +Digital land is very different from digital money. People mine +gold, but nobody mines land. Transactions in money are common, +fungible, and should involve minimal friction. Transactions in +land are rare, unique, and involve significant friction. And +most important, land has intrinsic utility; money does not. + +For example, Bitcoin needs a blockchain to solve the double spend +problem. A blockchain is very expensive. If we consider mining +dilution as a cost, a Bitcoin transaction costs multiple dollars. +That cost is exacted as a dilution tax on all holders (arguably, +correct accounting in BTC uses a "normalized BTC" unit which is +the fraction of all BTC outstanding), but it remains a cost. + +For certain values of "solved," the double spend problem is also +solved by a trusted escrow agent. For digital money, escrow is +not a workable general solution. For digital land, it may be. +Escrow is certainly orders of magnitude cheaper than a blockchain. + +Digital land in a decentralized system still needs to be owned +cryptographically, like Bitcoin. But as with real property, +it doesn't need to be mathematically impossible to steal digital +property. It just needs to be realistically impractical. If we +can ensure that those with the power to steal lack the motive, +and those with the motive lack the power, our design works. + +For digital land, a blockchain is an unnecessary expense. So +digital land has no mining. But from the Bitcoin purist's +perspective, any altcoin without mining is "100% premined" - ie, +probably a scam. Digital land is not digital currency, but it's +silly to argue over definitions. + +### A moral theory of digital land + +The libertarian philosophy of Murray Rothbard is the normative +belief system of Bitcoin. It's easy to explain digital land in +Rothbardian terms: ownership is on the homesteading principle. + +Property in land, as anything, is owned by those who create it - +which in real land means enclosing and cultivating it. Digital +land is just the same, but creation is a simpler process - and +it never involves conquest and/or genocide of previous owners. +All this is precisely according to Dr. Rothbard. + +Intuitively: if you didn't do any real work to create your +premined altcoin, it's a scam. If not, not. It's not necessary +to appeal to Rothbard to see why this makes sense. + +### Economic dynamics + +Any bootstrapping address space can define a metric which is the +fraction of namespace value recycled into development cost: the +erecycle rate*. In a sense, if the recycle rate is 100%, the +network does not leak economic energy. + +(In a scam, this non-leaked economic energy non-leaks into the +scammer's pocket. In a non-scam, it goes back into the engine. +It's expensive to settle any new America; if that America has +positive general utility, its value when settled should subsidize +the cost of settling it. Or at least, a design that doesn't work +this way is like Newcomen's steam engine, not Watt's.) + +One tradeoff against perfect recycling is the importance of +ownership decentralization. If you own an entire network because +you created it, you can increase the value of the whole address +space by giving blocks away. You may even increase the value of +your own position. + +A monopolized network is not politically healthy. So its +economic value is lower. So -- if the network is properly +designed and structured -- it can be stably demonopolized. The +monopoly power achieved by combining large positions is smaller +than the reputation cost of remonopolization, so centrifugal +force dominates and the system stays decentralized. + +Mining is one way to create initial demonopoly. But if it's not +actually necessary, mining has a recycle rate of zero. In a +blockchain network, mining is a necessary service and pays for +itself. In digital real estate, it would be a bad design. + +The objective of demonopolization isn't necessarily a *fair* +distribution of real estate. Fairness is nice - but from an +engineering perspective, to create the incentive structure of a +true republic, all that's needed is nontrivial decentralization. +Not much is needed to maintain the stabilizing incentives. + +## Regulatory engineering + +There's another kind of "political engineering." Our new network +is a sort of second-level political entity; but it exists within +a first-level entity, the real government. + +Minimizing bad interactions with the real government involves +three simple steps. First, don't look like you're breaking the +law. Second, don't break the law. Third, *really* don't break +the spirit of the law. + +Fortunately, digital land (such as DNS domains or IPv4 blocks) +already exists and is largely unregulated. Or rather, it's +regulated perfectly well by standard property law. + +This isn't just an accident of history. If DNS domains became a +useful way to launder money, or any kind of sink of financial +skulduggery, carders, pedobears, etc - the baleful eye of the +real government would rapidly fall on them. + +It's incumbent on anyone creating a new network of any kind not +just to avoid using it *yourself* for criminal purposes, but to +design it so that *it's not useful* for criminal purposes. A +darknet is not a machine for producing digital freedom. It's the +opposite - an excuse for installing digital tyranny. + +## Social engineering + +Bringing people together is an easy problem for any social +network. The hard problem is keeping them apart. In other +words, the hard problem is *filtering*. Society is filtering. + +A society without filters is a whirling, beige mess of atoms in a +blender. Beigeworld is an inhuman antisociety. A digital +republic is a garden; not only does a garden smell good, but +every flower smells good. An unfiltered network is a sewer. All +sewers have exactly the same smell. + +There are four orthogonal classes of filtering: topic, community, +flavor and quality. Filtering should be orthogonal to content +type: the topic filter "sci.physics" can be a chatroom, a +preprint archive and a streaming video channel. + +### Filtering: topic + +Users themselves want to keep themselves apart in structured +ways. Topic filtering is the most basic. Imagine a Reddit or a +Usenet with only one group. It would consist only of noise. + +For topic filtering, the ideal network has a single, organized +global topic tree (ontology). Call it half Usenet, half +Wikipedia and half the Dewey Decimal System. + +### Filtering: community + +Every topic deserves a community. But not every community is a +topic - a node in the ontology. To put it differently, not every +community should have a global name. The existence of small, +informal, private communities - tribes and microtribes - is +essential to a healthy network. + +The most inhuman form of community filtering is the form in which +"communities" are inferred algorithmically from an unstructured +social graph on a social server. A community is a tribe, not a +proximity cluster. Humans are a tribal species and have +exquisite instincts for interactions in medium-sized groups. + +### Filtering: flavor + +Flavor filtering is the only major filtering class that's poorly +developed in presently deployed systems. + +Suppose you're building an online grocery store. Suppose one +feature of your store is a profile setting where users can mark +that they're vegetarians. + +There is no use in asking vegetarians to shop for meat. At best, +you're trolling them; at worst, you're boring them. A cooking +site should not show them recipes for rack of lamb. Etc. + +More broadly, many forms of discourse have vibrant flavors which +other users are inherently uninterested in tasting. Like topics, +these flavors naturally form trees - some users may naturally +block all sexual content, others just a subset. + +But flavors are not topics. Topic can imply flavor - the bondage +board will have bondage-flavored content. Not all communities +are topical. Not all all topical communities stick to the point +in every single communication. + +A key component of flavor-oriented filtering is the principle +that flavor is marked by the content author, and enforced by +social convention in the community. A well-run community - and +not all communities will be well-run - enforces flavor marking +even among the locally dominant majority. + +Anyone reading this has been subject to speech codes, of one sort +or another, since they were old enough to talk. There's a reason +for this: blasphemy and/or heresy is disruptive and antisocial, +absolutely regardless of its actual intellectual merit. + +If we have a technical way to filter out blasphemy, we don't need +to suppress it with coercive force. Imagine a world in which, +not just in theory but also in practice, you could say anything +you wanted - so long as you marked it as what it was. + +### Filtering: quality + +All other filtering problems are unimportant next to quality. +Any successful digital republic must be in some sense a successor +of Usenet, whose defeat by the barbarians is a matter of history. +Alas, no one really knows how to do decentralized quality +filtering. (Even centralized filtering doesn't work well.) + +Fortunately, a young network is inherently high-quality. +Low-quality content is parasitic. It develops only as a network +matures. There's plenty of time to work on the outer walls while +the network boots up. + +And we can state one social-engineering goal which is not +*sufficient* for barbarian resistance, but perhaps almost +*necessary*. This is *expansion resistance*: the difficulty of +creating a new identity. + +If expansion resistance is zero, anyone can create infinite +numbers of identities; Sybil attacks become trivial. Expansion +resistance is negligible in email; spam filtering works, kind of; +it can save SMTP, it certainly couldn't *create* SMTP. Email has +a lot in common with 14th-century Constantinople. + +Metafilter (which anyone can join for $5) has solid expansion +resistance; it may not be perfect, but it's certainly spamless. +And the grand champion is certainly the '80s Arpanet, where where +creating an identity involved applying to a university or getting +a tech job - effectively, infinite expansion resistance. + +Imagine a genuinely abuse-free global, decentralized network, +where no one had ever heard of a firewall. Usenet was a fragile +flower that could only exist under this glass bell. We can't +go back to the Arpanet, but we have to understand why it worked. + +Broadly, disposable identities and sockpuppets are the enemy of +Internet civilization. The principle of one identity per person, +persona or corporation is an absolute principle of netiquette. +Genuine multiple personas exist - it's one thing to split your +own identity between person and persona, name and *nom de plume* +or *nom de drag* - but they're rare, and an easy exception. + +One of the most praised texts in 20th-century political science +is s James Scott's _Seeing Like A State_. Scott points out that +successful governments encourage social structures which are +structurally governable, like a forester planting rows of trees +in straight lines. People today have names like "Carter" because +medieval English barons made their peasants take surnames, just +so their tax databases would have valid primary keys. + +A naive libertarian might call this a bad thing. Simplicity is +not tyranny; simple government is good government, which is the +opposite of tyranny. The simpler its task, the less energy the +government must exert to achieve the same output. Anarchy and +tyranny are cousins; so are liberty and order. + +# Conclusion + +Goals and ideals are different things. An ideal is something you +want. A goal is an ideal, plus a realistic plan to get it. +Goals are more interesting than ideals, don't you think? + +Goals and features are also different things. What are the +features of a network that attempts to achieve these design +goals? In the next installment, we'll look at how our own +network -- Urbit -- measures up to these yardsticks. diff --git a/pub/docs/theory/old.mdy b/pub/docs/theory/old.mdy new file mode 100644 index 0000000000..b6925f1e1e --- /dev/null +++ b/pub/docs/theory/old.mdy @@ -0,0 +1,21 @@ +--- +title: demo.2013 +layout: video +sort: 3 +--- + +# demo.2013 + + + +## DVD commentary + +
+ +#### CY +This is what Urbit looked like two years ago. Yes, we can still do all this stuff. But... + +Urbit in 2013 had the same Nock and pretty much the same Hoon, but Arvo was a prototype. For instance: you'll see a shell and a messenger in this video. The shell is built into the kernel, and so is the talk protocol. We've learned a lot about purely functional operating systems since then... + +
+ diff --git a/pub/docs/theory/part-i.mdy b/pub/docs/theory/part-i.mdy new file mode 100644 index 0000000000..3509beb8d1 --- /dev/null +++ b/pub/docs/theory/part-i.mdy @@ -0,0 +1,512 @@ +--- +title: demo.2015.a +layout: video +sort: 1 +--- + +# demo.2015.a + + + +## DVD commentary + +
+ +#### CY +Hi! This is Curtis Yarvin, writer and director. I also played `~tasfyn-partyv`. I'm here with Galen Wolfe-Pauly - dramaturge, producer, editor, `~firput-miplex`. Hi, Galen! + +#### GWP +Hi! We definitely didn't do this alone. Philip Monk (`~wictuc-folrex`), Anton Dyudin (`~sondel-forsut`) and Henry Ault all helped a lot. Not to mention everyone who has committed to the repo... + +#### CY +This is the urbit demo known officially as ~2015.6.26. Or informally, as "Russian Nock." + +In this commentary track, we'll step you through the film line by line and show you the awesome that is urbit. And of course we'll exchange snarky remarks about the shoot. + +Galen, why "Russian Nock?" + +#### GWP +(laughs). Well, nock is urbit's VM, of course. But it's from a movie, "Russian Ark," which was shot in one take. + +#### CY +Nontrivial for a 13-minute demo of alpha software. + +#### GWP +We actually gave up. We put in a screen clear. + +#### CY +One. At the act break. + +Also, truth in advertising! urbit is alpha and still pretty slow. A lot of the footage is sped up. But almost nothing is actually *faked*. + +And we never used the reload button. Everything that looks reactive is. + +#### GWP +It looks like the input lines are pasted in, but we actually typed them. Then I took the footage and + +#### CY +And edited the keystrokes together. One cut per line. Yes, this workflow was my brilliant idea, thank you. + +#### GWP +Next time, actual tools! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/01.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/01.png) + +##### 00.03 + +#### CY +I'm in my urbit, `~tasfyn-partyv`. This is of course a VM running on my laptop. + +I have an urbit ticket for another planet, `~firput-miplex`. I want to send it to my friend Aldo van Eyck. + +#### GWP +Actually, he's a '50s Dutch architect from Team 10. Maybe we'll start getting email for him! + +#### CY +From my `:dojo` agent, basically the urbit shell, I'm going to run the `|invite` command, which tells `~tasfyn-partyv` to email Aldo the secret he needs to create `~firput-miplex`. + +Via an HTTP API to a mail delivery service, of course. But - the invite is actually the one fake thing in this video. + +#### GWP +The Mailgun API connector wasn't quite together yet. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/02.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/02.png) + +##### 00.07 + +#### CY +Via `:talk`, I get a success message from `|invite` - the 55th message `~tasfyn-partyv` got in its lifetime. + +#### GWP +Obviously this isn't the real `~tasfyn-partyv`, we're on + +#### CY +A test network. Because we're still alpha. + +Then I typed `^X` to switch from `:dojo` to `:talk`. + +#### GWP +These two applications are sharing your command line. + +#### CY +You multiplex the command line with ^X. It's like switching windows. But everyone's output + +#### GWP +Gets mixed together and scrolls up. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/03.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/03.png) + +##### 00.14 + +#### CY +In `:talk`, I set my target base to `~firput-miplex`. + +Then send you a `@` message, like irc /me. + +#### GWP +Bear in mind, `~firput-miplex` doesn't *exist* yet. + +#### CY +That's right. Suppose you sent someone email, but to an account they haven't created, on a server whose domain isn't registered. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/04.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/04.png) + +##### 00.17 + +#### CY +Obviously I stole this great error message from NFS. + +#### GWP +Obviously. + +#### CY +In urbit your name, or *base*, is your network address. + +We're sending `~firput-miplex` packets but not hearing back. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/05-a.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/05-a.png) + +##### 00.22 + +#### GWP +Aldo gets the email with his urbit ticket. He can't resist. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/05.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/05.png) + +##### 00.37 + +#### GWP +He builds urbit on his macbook and runs it. + +#### CY +He probably should get that secret out of his bash history. + +#### GWP +That's what Sergey Aleynikov said to himself! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/06.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/06.png) + +##### 00.43 + +#### CY +Ignore the boot messages behind the curtain. But look - + +#### GWP +I got your message. Definitely not how email works. + +#### CY +It's the way urbit networking works. The transport layer has no concept of connectivity state. it just waits. + +#### GWP +Nobody who watches our film notices this. Nobody. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/07.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/07.png) + +##### 00.45 + +#### CY +"Is your neighbor" means you've exchanged keys, basically. + +#### GWP +Why do we see it twice? + +#### CY +I have no idea. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/08.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/08.png) + +##### 01.26 + +#### GWP +I go to your `tasfyn-partyv.urbit.org` url. But there's nothing there. Error page. What? + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/09.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/09.png) + +##### 01.48 + +#### CY +I use a shell terminal to create the markdown file, edit it in vim, save it, and - + +#### GWP +And my browser immediately loads it as a web page. + +#### CY +Your error page is long-polling my server, which tracks dependencies for every page, good or bad. + +When I saved the markdown file, my urbit was watching the unix directory and committed my save as a change. That change event goes to the filesystem, `%clay`. + +`%clay` applies it and sees that the build system, `%ford`, is watching this subtree. `%ford` gets the notification and reports a dependency change to the web server `%eyre`, which returns 200 on the long-poll. + +Your browser then requests the new version of this url. `%eyre` forwards the request to `%ford`, which figures out that it can build an appropriate response by converting the markdown file to an HTML tree, then injecting the reload script, then printing the HTML. + +#### GWP +This isn't even a dev tool. It works in production. + +#### CY +Not that it scales! But it works fine behind an nginx cache. + +#### GWP +If we edit this commentary transcript right now, it'll change on the reader's screen. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/10.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/10.png) + +##### 02.14 + +#### CY +Here I go and edit the same file again, and we see your refresh script do its work. Ho hum, boring already. + +#### GWP +Imagine living in a world where everyone assumed it should just work this way - why wouldn't the browser update? Why would you not track dependencies on the page build? + +#### CY +Of course it's not just the page content we're tracking. If I changed the markdown parser, you'd reload again. It's everything local and time-versioned used in the build. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/11.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/11.png) + +##### 02.34 + +#### CY +Here I send you the magic long-form url that lets you specify version numbers directly. So you can see the old version of the poem. + +#### CY +There are two schools of web server url handling. There's the open-minded school that let you parse a path your own way, and the closed-minded school that hardcodes its own semantics into the url. urbit's - + +#### GWP +urbit's is the closed-minded school. + +#### CY +Exactly. Your url goes mostly undigested to the `%ford` build system. If these aren't the urls you want the user to see, wrap it in an nginx rewrite rule. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/12.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/12.png) + +##### 02.49 + +#### CY +Now the same thing but one directory up. + +#### GWP +Or `++arch` as we say in urbit. + +#### CY +Imagine a unix inode, except if every inode was a directory plus an optional file. But also, if unix was git. + +#### GWP +Or `%clay` as we say in urbit. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/13.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/13.png) + +##### 02.53 + +#### GWP +And the arch autoupdates too. Okay, we get it! + +#### CY +What's really going on here is that the whole subtree of urls under `/home/tree` is being rendered by one `tree.hoon` file, which defines a function that maps the url suffix to an HTML document. + +This `tree` function is pretty simple - it just uses the url suffix, `/pub/verse/bunting`, as a path in the filesystem `%clay`. + +Since there's no file on that arch, we just show the list of children. Just like Apache `public_html`. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/14.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/14.png) + +##### 03.15 + +#### CY +Here we're in the root of the urbit doc tree. + +#### GWP +Don't look too hard at the fine print! + +#### CY +Knuth was wrong about the root of all evil. It's actually premature documentation. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/15.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/15.png) + +##### 03.36 + +#### GWP +Definitely don't throw away your Wordpress just yet. + +#### CY +Galen wrote and designed this tree renderer. It's actually not bad for the amount of work he put in. + +#### GWP +Well, me and Anton mostly. It's a good start. + +#### CY +We should have demoed the navigation controls at the top, which let you move up or to the next sibling. There's no excuse for tree-structured content without uniform tree navigation affordances. + +#### GWP +Jakob Nielsen is shaking his head somewhere. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/16.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/16.png) + +##### 03.47 + +#### CY +Note that you're now surfing `firput-miplex.urbit.org`. That's your own urbit, of course. + +You're using an application, not just a functional renderer. So you need to log in with an identity. The default is you. And your password is that secret you got in the email - your ticket. + +#### GWP +We're not demoing single sign-on here, but we do it. + +#### CY +If I logged into `~firput-miplex` as `~tasfyn-partyv`, it would redirect me to `tasfyn-partyv.urbit.org` with a challenge. If I was already logged into my own urbit, logging in to yours would be automatic. + +We set a cleartext `*.urbit.org` cookie with your urbit base, so within `urbit.org` login is generally automatic. + +#### GWP +Obviously this login system is application-independent. It's a part of urbit, not a part of `:talk.` + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/17.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/17.png) + +##### 03.49 + +#### CY +Isn't that pretty? + +#### GWP +Ugh. Alpha is the name of the game around here. It's a start. + +#### CY +Galen went to art school or something. He has high standards. + +Notice of course that we see the same message backlog in the web ui and the console. Obviously it's the same app. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/17-a.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/17-a.png) + +##### 04.15 + +#### GWP +Nice little recovery there. Of course, in "Russian Ark" + +#### CY +They'd have reshot the whole movie. They had standards. + +Anyway, `:talk` is really our flagship app. It weighs in at almost 2000 lines of code counting the protocols. It's a genuine distributed system and it's being used right now. + +To make a long story short, `:talk` is basically Usenet slumming as a chat server. Well, it could use a few more bells and whistles. But the bones of NNTP are there. A chat line is just one type of message... + +#### GWP +Next on our agenda, animated cat gifs. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/18.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/18.png) + +##### 04.20 + +#### CY +We actually do support more than four urbits in a chatroom. + +#### AD +If there's no clock skew. + +#### GWP +The clock skew bug! That had to happen when we were filming. + +#### CY +NTP works, except when it doesn't. Fortunately Philip was up to the challenge. + +#### PM +The bug was in my code. On the other hand, you put it there. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/19.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/19.png) + +##### 04.34 + +#### GWP +This whole bitcoin sequence is a rendering disaster. On console and browser alike. + +#### CY +Also it makes no sense. Who types in their bitcoin address? More thinking, comrade, will have to go into this exercise. + +#### GWP +Who else has a language with syntax for a bitcoin address? + +#### CY +We need a syntax for bitcoin amounts, too. That argument is a string. Lame. + +#### GWP +At least the example isn't faked. Well, unless you count using the coinbase sandbox as "fake." + +#### CY +Arguably an exchange API is a silly way to send a payment. We should run a bitcoin core daemon with an urbit gateway. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/20.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/20.png) + +##### 04.55 + +#### CY +Yes, we actually tweeted to the real twitters here. + +#### GWP +Maybe there should be a twitter sandbox. + +#### CY +I know a lot of people who could use a twitter sandbox! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/21.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/21.png) + +##### 05.14 + +#### CY +Here I start lecturing you a bunch. Anyway, this argument is a little cryptic. Of course an evil or compromised host can look at any of your data on the service, barring some kind of breakthrough in efficient homomorphic encryption. And this is true whether you're a VM or a database row. The Linode hack, for instance, was an attack on cloud VMs. + +But it still makes a lot more sense to keep valuable secrets in a cloud-hosted VM than a cloud-hosted application database - because there are all sorts of ways that database can be queried. Nobody queries a VM. + +When the line between hosting provider and hosted computer is drawn at the edge of the VM, it's a very precise line. It would be outrageous if amazon was peeking into AWS VMs, stealing their data and using it to sell more ads. So logically you're not safer, but in practice you are. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/22.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/22.png) + +##### 05.33 + +#### CY +Of course it looks better if you unlimit coredumpsize. So you actually, uh, dump core. Like the script says. + +#### GWP +Curtis went to unix school or something. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/23.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/23.png) + +##### 05.41 + +#### CY +Too many printfs! But you see that there's only one level of storage in urbit. At least logically, all data persists. + +#### GWP +My iPhone pretends to work this way. But I'm not sure it does. + +#### CY +Practically, the way we create this abstraction is the way all databases work: a checkpoint and a log. Logging is easy - there are all kinds of great cloud tools for it. + +If you look at the printfs, you have a checkpoint at event 9616. but the log goes up to 9624. You load this checkpoint, then rerun the last 8 events. + +The only rule is that you can't execute the actions caused by an event until you've logged the event. For example, if an incoming packet causes an outgoing packet, you can't send the outgoing packet until you've logged the incoming. + +#### GWP +So every event is a transaction. Watch out, Oracle! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/24.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/24.png) + +##### 05.52 + +#### GWP +I'm not sure we can get away with this "NoDB" meme. Didn't you just say urbit *is* a database? + +#### CY +A foolish consistency is the hobgoblin of little minds. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/25.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/25.png) + +##### 06.41 + +#### CY +A lot of this demo is sped up. But that part is *really* sped up. Anyway, in case it's not obvious what you did, + +#### GWP +I deleted my checkpoint, than restarted from just the log. + +#### CY +And arrived at exactly the same bit-for-bit state. + +#### GWP +You know, cosmic rays could cause big problems with urbit. + +#### CY +I'm aware of that. It's a real issue with any kind of repeatable computing. You depend on perfect execution. Certainly, ECC memory is a must. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/26.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/26.png) + +##### 06.54 + +#### GWP +And... here we see a massive demo fail. + +#### CY +Oh, hardly. First, the nock spec was somehow not in `%clay`. So we had to add it back in from another window. Second, you said `+cat` in `:talk`, so you `:talk`ed the cat command rather than `:dojo`ing it. + +And third, your browser hung up and reconnected. Just typical printf noise from alpha code. + +#### GWP +I wonder if anyone actually noticed that. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/27.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/i/27.png) + +##### 06.56 + +#### GWP +And... there's nock. How did you come up with that, anyway, Curtis? + +#### CY +Gosh, I barely even remember. I started with something much bigger and kept pulling things out. I guess. + +#### GWP +Sort of the Kevin Herbert strategy? + +#### CY +(laughs.) Kids, definitely don't try this at home. + +#### GWP +Thanks for watching our film! Or the first half, anyway... + +
diff --git a/pub/docs/theory/part-ii.mdy b/pub/docs/theory/part-ii.mdy new file mode 100644 index 0000000000..f495122173 --- /dev/null +++ b/pub/docs/theory/part-ii.mdy @@ -0,0 +1,462 @@ +--- +title: demo.2015.b +layout: video +sort: 2 +--- + +# demo.2015.b + + + +## DVD commentary + +
+ +#### CY +Hi, DVD viewers! Once again, it's Curtis Yarvin and Galen Wolfe-Pauly with the second half of "Russian Nock." + +#### GWP +Would you like to play a game of global thermonuclear war? + +#### CY +Not today! But I'm always up for tictactoe. + +In this section of our epic major motion picture, we build a simple Urbit appliance: tictactoe. The point of the exercise is just that it's easy to build distributed social applications in Hoon and Arvo, our language and OS. At least, it's easy once you know Hoon. + +#### GWP +Hoon is actually much easier to learn than it looks. But why, Curtis, is it easy to build social distributed apps in Urbit? + +#### CY +Because Urbit solves all the hard problems for you. Identity, of course. But also idempotence, transactionality, protocol definition and validation, publish/subscribe... + +#### GWP +Urbit certainly sounds like something that would work for idempotence. + +#### CY +It does! Urbit. It's what plants crave. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/01.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/01.png) + +##### 00.56 + +#### CY +So we cleared the screen, which means we got to annihilate the universe. + +#### GWP +We shot this second chapter in a new test universe. + +#### CY +That's why we become neighbors again. We just made these urbits. + +#### GWP +I hated the text on screen at the end of the last take. Very marketing. + +#### CY +But it does include the instructions for what you just did. + +First, you synced my `%demo` desk - desk is Urbitese for "branch" - into your `%demo` desk. That both copies over the current `%demo` state, and subscribes to any future changes. + +Second, you started the `%taco` app from your `%demo` desk. + +#### GWP +But it didn't work. + +#### CY +Neither of us has a `%demo` desk. And I haven't written `%taco` yet. But (spoiler) it will work. + +Here's why "it doesn't matter what order you do things in" (for certain values of "order" and "thing"). In a distributed system, state changes whenever possible should be knowledge events. If you learn A and B, it matters not whether you learn A before B or B before A: what you know is AB. Any data model that works this way is inherently a CRDT. If you know what that is. + +#### GWP +I don't. + +#### CY +A conflict-free replicated data type. It's what plants crave. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/02.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/02.png) + +##### 01.01 + +#### CY +In `%clay`, Urbit's revision-control system, your working tree is actually a desk (again, Urbitese for "branch"). + +From Urbit you can mount synced Unix directories that work like Dropbox. Urbit monitors your directory tree with `inotify()`, catches any changes, and commits them as Urbit events. So you don't need some new Urbit editor to develop for Urbit. You can keep using the best editor in the world, which is obviously vim. + +#### GWP +Or Emacs. We also have an Emacs mode for Hoon. Is that what it's called? A mode? + +#### CY +Is it? I wouldn't know. Anyway, you definitely want syntax highlighting in Hoon, though I eschew it to keep my hand strong. + +#### GWP +Thank you for that, Curtis. + +#### CY +Anyway, to make changes in `%demo`, the general practice is to make a `%demo-work` desk whose history is an unstructured series of edits. Your merges from `%demo-work` to `%demo` are the equivalent of git commits. + +(Note that we don't explicitly mount Urbit folders to Unix directories here. Not that we don't need to, just that we shot this with an older version of Urbit that automounted.) + +#### GWP +What's the equivalent of a commit message? + +#### CY +As part of your merge change, prepend a line to a commit log on the target. Why should the log be a built-in feature of the filesystem? That's so git. + +#### GWP +Does the `|merge` command actually do that? + +#### CY +No, but it should! + +Speaking of commands, we did a `|merge` here and a `|sync` before. Again, `|sync` sets up a flow, `|merge` is a one-time copy. So future updates of my `%demo` won't flow into `%demo-work`, but they will flow into *your* `%demo`. Because I merged and you synced. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/03.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/03.png) + +##### 01.11 + +#### CY +It looks like I'm quitting Urbit and going into an editor. I'm actually just switching windows. I could quit Urbit, of course -- it's a database, after all. But I don't have to. + +#### GWP +I think that's just bad direction. + +#### CY +It's actually bad editing. Anyway, here we build the basic state model of a simple tictactoe app. This is a structure file, so it goes in `/~tasfyn-partyv/demo-work=/sur/taco/hoon` (in Urbit) and `demo-work/sur/taco.hoon` (in Unix). + +I honestly think anyone could stare at this file for a couple of minutes and realize what it's doing. The state of a tictactoe game is: the board, and whose turn it is. The turn `who` is a boolean. The board we store in two bitmaps, `box` for X and `boo` for O. Hoon is very good at bit fiddling and its arrays are just lists, so our boards are just atoms (unsigned integers). + +#### GWP +I still think `,[x=@ y=@]` is a really ugly syntax. + +#### CY +I think you're right. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/04.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/04.png) + +##### 01.23 + +#### CY +Then we type in `/===/lib/taco/hoon`. This is the "business logic" of tictactoe. I feel it compares well to the same problem solved in other languages. + +I won't go into super deep detail about this code, but you see a couple of cores `++bo` (for board bitmaps) and `++go` (for the `++play` structure we just defined). If Hoon had C++ classes, these would be classes. You can see the getters and setters getting and setting. There's also a nice nested core within `++go`, `++at`. Hard to find anything quite like a nested core in normal Earth languages. + +I actually typed in these files, by the way. By hand. Isn't that lame? I think there are a couple of errors. Of course, to not break the take, I switched to a different window and copied them in by hand. + +#### GWP +Then I created the illusion of superhuman flowing code. In the editing room. Have I told you how many edits that took? + +#### CY +At least once. Anyway, I want to emphasize something else, which is that although (as someone who reads Hoon) this is perfectly readable Hoon, it's not quite what real code looks like. The formatting in particular has been pretty seriously golfed. This looks like good Hoon to me, but it's insanely overgroomed like a French poodle. You'd never do this much formatting work if it wasn't a demo. + +#### GWP +Honestly, I think normal Hoon looks better. + +#### CY +Honestly, it's all about the line count. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/05.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/05.png) + +##### 01.41 + +#### CY +Then it takes us 82 lines of code to create the actual application core. This seems like a lot. Actually, there's a lot of code here that other multi-console apps could duplicate, so maybe there should be a library. + +This version of `%taco` is a multiplayer game, of course, but kind of a trivial one. It doesn't have its own network protocol. Imagine if you were writing a Unix tictactoe server, and it just worked by running the telnet protocol and letting clients telnet into it. + +The Urbit equivalent of the telnet protocol is the `%sole` system, which the console driver (the `%hood` appliance) uses to talk to command-line appliances foreign and domestic. `%sole-action` messages get poked forward; `%sole-effect` messages come back in a subscription. + +`%sole` is actually quite a fancy command-line model. It treats the input line as multi-writer shared state and manages edits with OT. So the app can reject invalid input as it's being typed, do tab completion, etc. + +#### GWP +Overtime? + +#### CY +Operational transformation. It has electrolytes. Anyway, what the code in this file is doing: parsing the command line (Hoon has pretty good combinator parsers), applying updates, detecting changes, updating subscribers. + +Tictactoe is a two-player game, of course, but any number of people could log into `%taco`. One player is the server; everyone else can play the other side. + +#### GWP +But are the parsers monadic? Does Hoon have monads? + +#### CY +Shh! We try not to use that word. If it helps you to think in category theory, great. Category theory is a theory of everything, including Hoon. For most people it's easier to just learn Hoon. + +#### GWP +Sometimes I wonder if it's just that you're not smart enough to learn category theory, Curtis. + +#### CY +(in Maryland accent) I'm just a country boy but I'm sure I could figure it out real good if I had the need. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/06.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/06.png) + +##### 02.03 + +#### CY +So what happened here is that when I merged my `%demo-work` desk into my `%demo` desk, your `%demo` desk was synced to it and got a subscription update. Then, your urbit was already configured to be running the `%taco` appliance from your `%demo` desk. So when it noticed that the source code existed, your urbit started the app with its default state. + +#### GWP +Why am I not responding, still trying? + +#### CY +That's the old network layer. There's still a bunch of 2013 code in there. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/07.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/07.png) + +##### 02.14 + +#### CY +Here I telnet into your `%taco` appliance. Or more like ssh, because it's secure. + +#### GWP +Secure? Really? + +#### CY +Well, it's encrypted. That means it's probably secure. It's certainly *designed* to be secure. + +#### GWP +I'm sure the NSA is very worried. + +#### 02.43 + +#### CY +Now this is a total party trick. Instead of editing through `%demo-work` like a responsible developer, I edit `%demo` directly. When I save the file, without any other command, the effect cascades all the way through and hot-reloads your `%taco` on your urbit, changing the 'X' icon to 'Z' and back again. + +Moreover, if you notice, I'm not even editing the `%taco` app core. I'm editing a library it depends on. Because dependencies. + +#### GWP +Would it be a good idea to do this in production? Also, what happens if you make a change that doesn't compile? + +#### CY +No. If only because it leaves a gnarly edit history. Also -- we'll see that in a little bit. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/08.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/08.png) + +##### 03.08 + +#### GWP +Marketing copy in a demo really isn't appropriate. + +#### CY +I know. It is kind of true, though. + + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/09.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/09.png) + +##### 03.11 + +#### CY +Because I made those edits directly in `%demo`, I need to merge back into `%demo-work`. Or... + +#### GWP +Can't the filesystem handle a trivial three-way merge like that? + +#### CY +I think so! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/10.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/10.png) + +##### 03.19 + +#### CY +For the new version our game state gets more sophisticated. (I don't know why I said "protocol.") Now it knows the players and the audience. + +Of course we need to be able to upgrade without losing our state. So we'll have to write an adapter. Which is pretty easy, because we just added a couple fields. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/11.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/11.png) + +##### 03.28 + +#### CY +Now we need to be able to print the new information for the prompt. Some of this code is getting pretty golfy -- I really can't endorse the style of `++vew`. + +Also, you probably missed it above, but we added a board-to-JSON translator in `++bo`. We can turn a Hoon bitfield into a JSON array in one line of code. It's not even a golfy line. + +#### GWP +`a/(turn (gulf 0 9) |=(@ b/(bit +<)))`. Now that's not printing the JSON, just making the noun that we'll print. + +#### CY +I assert that anyone who can learn JS can learn Hoon. On the other hand, I don't know JS. + +#### GWP +Just don't use the word "monkey" in the same sentence as "front-end," and we'll be fine. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/12.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/12.png) + +##### 03.35 + +#### CY +This is an actual protocol structure - a "mark." It's like a MIME type, except it's actually a type. + +`%taco-move` is just a coordinate `point`, a pair of integers (defined in `sur/taco.hoon`). + +#### GWP +It's actually really nice to have your network messages auto-validated and stuff. + +#### CY +Also auto-translated -- as you see, there's an arm for turning `%json` into `%taco-move`. + +#### GWP +That's convenient for making moves from a browser client, obviously. + +#### CY +Not that the app knows it's dealing with a browser. It thinks the browser is another urbit. The HTTP server translates automagically. + +#### GWP +Because the mark has that translator arm. Good times! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/13.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/13.png) + +##### 03.43 + +#### CY +Another mark: moves go forward, updates go backward. So we have to translate `%taco-update` (the whole game state) into `%json`. + +#### GWP +Again we're just building a JSON-shaped noun. The web server, `%eyre`, will actually print as text. + +#### CY +Because that's a great thing to send over the network. Text. + +#### GWP +We should eventually have a socket driver that doesn't do this. Right now it's all long-poll / Comet. + +#### CY +Please don't say "socket" when you mean "websocket." It reminds me of how old I am. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/14.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/14.png) + +##### 03.51 + +#### CY +Is this a special template language? No, it's actually Hoon. It has custom syntax for building XML-shaped nouns. So we don't need a template language. + +#### GWP +You're so old, you still believe in XHTML. + +#### CY +HTML5 was a terrible blow. But at least I'm not so old, I believe in SGML. + +#### GWP +Obviously this "HTML" file is just a shell for a JS app. As you see, it loads jQuery from a CDN and a bunch of resources from its own urbit. Including my own `urb.js`. + +#### CY +Front-end frameworks are important. We're all quadrumana at heart. + +#### GWP +I'll ignore that remark. In any case, an `urb.js` client application POSTs commands to the server and gets back a stream of state diffs (Comet, basically). I used jQuery here but this design works very well with React. + +#### CY +It's also exactly the way an urbit client works, so the server doesn't have to know it's talking over JSON to a browser. Basically the web app doesn't even know it's a web app. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/15.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/15.png) + +##### 04.02 + +#### GWP +Sometimes a client is just a client. + +#### CY +I don't think there's any need to code-golf our JS. Any front-end -- + +#### GWP +Any front-end *engineer* will have no problem programming for `urb.js`. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/16.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/16.png) + +##### 04.07 + +#### CY +If I edit this CSS, will the browser get a change event and reload it? + +#### GWP +If the JS page is written right, I think so. We normally listen to the page source and its dependencies and refresh on any change. More long-polling. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/17.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/17.png) + +##### 04.25 + +#### CY +We really screwed up the shoot here. We were going to do a retake. + +I did two things wrong: I merged in the wrong direction (nothing the system can't handle). And I upgraded every `%taco` file *except the app itself*. + +#### GWP +But then we thought: why not show how resilient the system is? Also it was getting pretty late. + +#### CY +So this is what happens when you get an update and it doesn't compile. It's what should happen. You see an error message and nothing else happens. Your app is fine. + +#### GWP +As easy as that! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/18.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/18.png) + +##### 04.34 + +#### CY +This sequence moves a little fast. But as you see, we're not changing much about the application file proper when we give it its own network protocol and make it a web app. + +After all, translating generic console effects into application state changes is a fairly interesting problem. From our own protocol, no parsing is needed. Likewise, the web app is just pushing raw commands and subscribing to state changes. + +Now, this version of tictactoe is still really a client-server system, not peer-to-peer. Asymmetric, not symmetric. But the complexity of distributed state coupling isn't much different. + +#### GWP +Now, you sound like you work for Oracle. Or maybe the Defense Department. Whatever, does the game actually work? + +#### CY +It works for the take we recorded! + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/19.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/19.png) + +##### 04.40 + +#### CY +I think I had to kick it with an event to get it to change the prompt after the reload. That's supposed to be automatic. + +Note that the reason *my* prompt changes is that the new app file, on my urbit, has propagated to your urbit and reloaded the app, which then has streamed the new prompts to all subscribing clients, including both my urbit and yours. + +Of course, our state adapter worked and our game hasn't been interrupted. + +#### GWP +Is this schema evolution? Is it typed? Is it ACID? How long can application state live this way? + +#### CY +Yes, yes, yes, and as long as it wants. Generally you do want to export core data to the filesystem so that other apps can see it. The filesystem is not just revision-controlled but also typed, so this isn't too awful. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/20.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/20.png) + +##### 04.57 + +#### GWP +So you're playing against me on my own urbit. You're talking to my `:taco` appliance from your urbit (which also is upstream of my installation), through the console protocol. I'm logged into the same appliance as a web client, authenticated as myself. + +#### CY +Pretty much. Also, this is a really nice-looking tictatoe board and it's a shame our game is so short. We even grey out the board when it's not your turn. + +Note that the client UI even has a presence widget. This isn't just tictactoe. It's *social* tictactoe. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/21.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/21.png) + +##### 05.19 + +#### CY +Did you have to just win like that? + +#### GWP +I think I panicked. Does it have to clear the board that fast? Shouldn't it show my move? + +#### CY +It's true. This isn't the world's most polished tictactoe app. On the other hand, we do have an operating system to finish. + +[![](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/22.png)](https://storage.googleapis.com/urbit-extra/preview-1/screenshots/ii/22.png) + +##### 05.22 + +#### GWP +We should have credits here. + +#### CY +We should! But since we don't, I'd like to thank Joan Torres for this awesome soundtrack, which he put together on like two days' notice. In regular life Joan is actually a world-class jazz bassist. He also writes Urbit jets in C. Not sure how that's possible, but there you go. + +Unfortunately we borrowed the audio for part I (it's Ulrich Schnauss, _Knuddelmaus_), but we'll fix that as soon as possible! + +
diff --git a/pub/docs/theory/whitepaper.mdy b/pub/docs/theory/whitepaper.mdy new file mode 100644 index 0000000000..5624232845 --- /dev/null +++ b/pub/docs/theory/whitepaper.mdy @@ -0,0 +1,2346 @@ +--- +title: Urbit whitepaper +sort: 0 +--- + +Urbit: an operating function +============================ + +
This is Urbit whitepaper DRAFT 40K. Some small details +remain at variance with the codebase.
+ +Abstract +======== + +Urbit is a clean-slate, full-stack redesign of system software. +In 25K lines of code, it's a packet protocol, a pure functional +language, a deterministic OS, an ACID database, a versioned +filesystem, a web server and a global PKI. Urbit runs on a frozen +combinator interpreter specified in 200 words; the rest of the +stack upgrades itself over its own network. + +Architecturally, Urbit is an opaque computing and communication +layer above Unix and the Internet. To the user, it's a new +decentralized network where you own and control your own +general-purpose personal server, or "planet." A planet is not a +new way to host your old apps; it's a different experience. + +Download +-------- +[As `.md`](https://storage.googleapis.com/urbit-extra/preview-1/whitepaper.md) +[As `.pdf`](https://storage.googleapis.com/urbit-extra/preview-1/whitepaper.pdf) + + + +
+ +Objective +========= + +How can we put users back in control of their own computing? + +Most people still have a general-purpose home computer, but it's +atrophying into a client. Their critical data is all in the +cloud. Technically, of course, that's ideal. Data centers are +pretty good at being data centers. + +But in the cloud, all users have is a herd of special-purpose +appliances, not one of which is a general-purpose computer. Do +users want their own general-purpose personal cloud computer? If +so, why don't they have one now? How might we change this? + +Conventional cloud computing, the way the cloud works now, is +"1:n". One application, hosted on one logical server by its own +developer, serves "n" users. Each account on each application is +one "personal appliance" - a special-purpose computer, completely +under the developer's control. + +Personal cloud computing, the way we wish the cloud worked, is +"n:1": each user has one logical server, which runs "n" +independent applications. This general-purpose computer is a +"personal server," completely under the user's control. + +"Personal server" is a phrase only a marketing department could +love. We prefer to say: your *planet*. Your planet is your +digital identity, your network address, your filesystem and your +application server. Every byte on it is yours; every instruction +it runs is under your control. + +Most people should park their planets in the cloud, because the +cloud works better. But a planet is not a planet unless it's +independent. A host without contractual guarantees of absolute +privacy and unconditional migration is not a host, but a trap. +The paranoid and those with global adversaries should migrate to +their own closets while home computing remains legal. + +But wait: isn't "planet" just a fancy word for a self-hosted +server? Who wants to self-host? Why would anyone want to be their +own sysadmin? + +*Managing* your own computing is a cost, not a benefit. Your +planet should be as easy as possible to manage. (It certainly +should be easier than herding your "n" personal appliances.) But +the benefit of "n:1" is *controlling* your own computing. + +The "n:1" cloud is not a better way to implement your existing +user experience. It's a different relationship between human and +computer. An owner is not just another customer. A single-family +home is not just another motel room. Control matters. A lot. + +Perhaps this seems abstract. It's hard to imagine the "n:1" world +before it exists. Let's try a thought-experiment: adding +impossible levels of ownership to today's "1:n" ecosystem. + +Take the web apps you use today. Imagine you trust them +completely. Imagine any app can use any data from any other app, +just because both accounts are you. Imagine you can "sidegrade" +any app by moving its data safely to a compatible competitor. +Imagine all your data is organized into a personal namespace, and +you can compute your own functions on that namespace. + +In this world, no app developer has any way to hold your data +hostage. Forget how this works technically (it doesn't). How does +it change the apps? + +Actually, the rules of this thought-experiment world are so +different that *few of the same apps exist*. Other people's apps +are fundamentally different from your own apps. They're not +"yours" because you developed them -- they're your apps because +you can fire the developer without any pain point. You are not a +hostage, so the power dynamic changes. Which changes the app. + +For example: with other people's apps, when you want to shop on +the Internets, you point your browser at amazon.com or use the +Google bar as a full-text store. With your own apps, you're more +likely to point your browser at your own shopping assistant. This +program, which *works entirely for you and is not slipping anyone +else a cut*, uses APIs to sync inventory data and send purchase +orders. + +Could you write this web app today? Sure. It would be a store. +The difference between apps you control and apps you don't is the +difference between a shopping assistant and a store. It would be +absurd if a shopping assistant paid its developer a percentage of +all transactions. It would be absurd if a store didn't. The +general task is the same, but every detail is different. + +Ultimately, the planet is a different user experience because you +trust the computer more. A program running on someone else's +computer can promise it's working only for you. This promise is +generally false and you can't enforce it. When a program on your +computer makes the same promise, it's generally true and you can +enforce it. Control changes the solution because control produces +trust and trust changes the problem. + +Could we actually add this level of user sovereignty to the "1:n" +cloud? It'd take a lot of engineers, a lot of lawyers, and a +whole lot of standards conferences. Or we could just build +ourselves some planets. + +Obstacles +========= + +If this is such a great product, why can't you already buy it? + +In 2015, general-purpose cloud servers are easily available. But +they are industrial tools, not personal computers. Most users +(consumer and enterprise) use personal appliances. They choose +"1:n" over "n:1". Why does "1:n" beat "n:1" in the real world? + +Perhaps "1:n" is just better. Your herd of developer-hosted +appliances is just a better product than a self-hosted planet. Or +at least, this is what the market seems to be telling us. + +Actually, the market is telling us something more specific. It's +saying: the appliance herd is a better product than a self-hosted +*Unix server on the Internet*. + +The market hasn't invalidated the abstract idea of the planet. +It's invalidated the concrete product of the planet *we can +actually build on the system software we actually have*. + +In 1978, a computer was a VAX. A VAX cost \$50K and was the size +of a fridge. By 1988, it would cost \$5K and fit on your desk. +But if a computer is a VAX, however small or cheap, there is no +such thing as a PC. And if a planet is an AWS box, there is no +such thing as a planet. + +The system software stack that 2015 inherited -- two '70s +designs, Unix and the Internet -- remains a viable platform for +"1:n" industrial servers. Maybe it's not a viable platform for +"n:1" personal servers? Just as VAX/VMS was not a viable +operating system for the PC? + +But what would a viable platform for a personal server look like? +What exactly is this stack? If it's not a Unix server on the +Internet, it's an X server on network Y. What are X and Y? Do +they exist? + +Clearly not. So all we have to replace is Unix and the Internet. +In other words, all we have to replace is everything. Is this an +obstacle, or an opportunity? + +A clean-slate redesign seems like the obvious path to the levels +of simplicity we'll need in a viable planet. Moreover, it's +actually easier to redesign Unix and the Internet than Unix *or* +the Internet. Computing and communication are not separate +concerns; if we design the network and OS as one system, we avoid +all kinds of duplications and impedance mismatches. + +And if not now, when? Will there ever be a clean-slate redesign +of '70s system software? A significant problem demands an +ambitious solution. Intuitively, clean-slate computing and the +personal cloud feel like a fit. Let's see if we can make the +details work. We may never get another chance to do it right. + +Principles of platform reform +----------------------------- + +To review: we don't have planets because our antique system +software stack, Unix and the Internet, is a lousy platform on +which to build a planet. If we care about the product, we need to +start with a new platform. + +How can we replace Unix and the Internet? We can't. But we can +tile over them, like cheap bathroom contractors. We can use the +Unix/Internet, or "classical layer," as an implementation +substrate for our new layer. A platform on top of a platform. +Under your Brazilian slate, there's pink Color Tile from 1976. + +"Tiling over" is the normal way we replace system software. The +Internet originally ran over phone circuits; under your +transatlantic TCP packets, there's an ATM switch from 1996. And +of course, under your browser there's an OS from 1976. + +After a good job of tiling over, the obsolete layer can sometimes +be removed. In some cases it's useless, but would cost too much +to rip out. In some cases it's still used -- a Mac doesn't (yet) +boot to Safari. But arguably, that's because the browser platform +is anything but a perfect tiling job. + +One property the browser got right was total opacity. The old +platform implements the new platform, but can't be visible +through it. If web apps could make Unix system calls or use Unix +libraries, there would be no such thing as web apps. + +(In fact, one easy way to think of a planet is as "the browser +for the server side." The browser is one universal client that +hosts "n" independent client applications; the planet is one +universal server that hosts "n" independent server applications.) + +And the bathroom remains a bathroom. The new platform does the +same *general* job as the old one. So to justify the reform, it +has to be *much* better at its new, specific job. For instance, +the browser is easily two orders of magnitude better than Unix at +installing untrusted transient applications (ie, web pages). + +Abstract targets +---------------- + +Again, we have two problems: Unix and the Internet. What about +each do we need to fix? What exactly is wrong with the classical +layer? What qualities should our replacement have? + +Since we're doing a clean-slate design, it's a mistake to focus +too much on fixing the flaws of the old platform. The correct +question is: what is the right way to build a system software +stack? Besides cosmetic details like character sets, this +exercise should yield the same results on Mars as Earth. + +But we come from Earth and will probably find ourselves making +normal Earth mistakes. So we can at least express our abstract +design goals in normal Earth terms. + +### A simpler OS + +One common reaction to the personal-server proposition: "my +mother is not a Linux system administrator." Neither is mine. She +does own an iPhone, however. Which is also a general-purpose +computer. A usability target: a planet should be as easy to +manage as an iPhone. + +(To go into way too much detail: on a planet with the usability +of iOS, the user as administrator has four system configuration +tasks for the casual or newbie user: (a) deciding which +applications to run; (b) checking resource usage; (c) configuring +API authentication to your existing Web apps (your planet wants +to access, rip or even sync your silo data); and (d) maintaining +a reputation database (your friends, enemies, etc). (a) and (b) +exist even on iOS; (c) and (d) are inherent in any planet; (d) is +common on the Web, and (c) not that rare.) + +Could a planet just run iOS? The complexity of managing a +computer approximates the complexity of its state. iOS is a +client OS. Its apps are not much more than webpages. There is +much more state on a server; it is much more valuable, and much +more intricate, and much more durable. + +(Moreover, an iOS app is a black box. It's running on your +physical hardware, but you don't have much more control over it +than if it was remote. iOS apps are not designed to share data +with each other or with user-level computing tools; there is no +visible filesystem, shell, etc. This is not quite the ownership +experience -- it's easy to get locked in to a black box.) + +And while an Apple product is a good benchmark for any usability +goal, it's the exception that proves the rule for architecture. +iOS is Unix, after all -- Unix with a billion-dollar makeover. +Unix is not a turd, and Cupertino could probably polish it even +if it was. + +Simplicity is the only viable path to usability for a new +platform. It's not sufficient, but it is necessary. The computer +feels simple to the user not because it's presenting an illusion +of simplicity, but because it really is simple. + +While there is no precise way to measure simplicity, a good proxy +is lines of code -- or, to be slightly more sophisticated, +compressed code size. Technical simplicity is not actually +usability, just a force multiplier in the fight for usability. +But usability proper can only be assessed once the UI is +complete, and the UI is the top layer by definition. So we assume +this equation and target simplicity. + +### A sane network + +When we look at the reasons we can't have a nice planet, Unix is +a small part of the problem. The main problem is the Internet. + +There's a reason what we call "social networks" on the Internet +are actually centralized systems -- social *servers*. For a "1:n" +application, social integration - communication between two users +of the same application - is trivial. Two users are two rows in +the same database. + +When we shift to a "n:1" model, this same integration becomes a +distributed systems problem. If we're building a tictactoe app in +a "1:n" design, our game is a single data structure in which +moves are side effects. If we're building the same app on a +network of "n:1" model, our game is a distributed system in which +moves are network messages. + +Building and managing distributed Internet systems is not easy. +It's nontrivial to build and manage a centralized API. Deploying +a new global peer-to-peer protocol is a serious endeavor. + +But this is what we have to do for our tictactoe app. We have to +build, for instance, some kind of identity model - because who +are you playing against, an IP address? To play tictactoe, you +find yourself building your own distributed social network. + +While there are certainly asocial apps that a planet can run, +it's not clear that an asocial planet is a viable product. If the +cost of building a distributed social service isn't close to the +cost of a building its centralized equivalent, the design isn't +really successful. + +Fortunately, while there are problems in "n:1" services that no +platform can solve for you (mostly around consistency) there are +set of problems with "1:n" services that no platform can solve +for you (mostly around scaling). Scaling problems in "n:1" social +services only arise when you're Justin Bieber and have a million +friends, ie, a rare case even in a mature network. Mr. Bieber can +probably afford a very nice computer. + +### Concrete requirements + +Here are some major features we think any adequate planet needs. +They're obviously all features of Urbit. + +#### Repeatablе computing + +Any non-portable planet is locked in to its host. That's bad. You +can have all the legal guarantees you like of unconditional +migration. Freedom means nothing if there's nowhere to run to. +Some of today's silos are happy to give you a tarball of your own +data, but what would you do with it? + +The strongest way to ensure portability is a deterministic, +frozen, non-extensible execution model. Every host runs exactly +the same computation on the same image and input, for all time. +When you move that image, the only thing it notices is that its +IP address has changed. + +We could imagine a planet with an unfrozen spec, which had some +kind of backward-compatible upgrade process. But with a frozen +spec, there is no state outside the planet itself, no input not +input to the planet itself, and no way of building a planet on +one host that another host can't compute correctly. + +Of course every computer is deterministic at the CPU level, but +CPU-level determinism can't in practice record and replay its +computation history. A computer which is deterministic at the +semantic level can. Call it "repeatable computing." + +#### Orthogonal persistence + +It's unclear why we'd expose the transience semantics of the +hardware memory hierarchy to either the programmer or the user. +When we do so, we develop two different models for managing data: +"programming languages" and "databases." Mapping between these +models, eg "ORM," is the quintessence of boilerplate. + +A simple pattern for orthogonal persistence without a separate +database is "prevalence": a checkpoint plus a log of events since +the checkpoint. Every event is an ACID transaction. In fact, most +databases use this pattern internally, but their state transition +function is not a general-purpose interpreter. + +#### Identity + +In the classical layer, hosts, usernames, IP addresses, domain +names and public keys are all separate concepts. A planet has one +routable, memorable, cryptographically independent identity which +serves all these purposes. + +The "Zooko's triangle" impossibility result tells us we can't +build a secure, meaningful, decentralized identity system. Rather +than surrendering one of these three goals, the planet can +retreat a little bit on two of them. It can be memorable, but not +meaningful; it can start as a centralized system, but +decentralize itself over time. 100-80-70 is often preferable to +100-100-0. + +#### A simple typed functional language + +Given the level of integration we're expecting in this design, +it's silly to think we could get away without a new language. +There's no room in the case for glue. Every part has to fit. + +The main obstacle to functional language adoption is that +functional programming is math, and most human beings are really +bad at math. Even most programmers are bad at math. Their +intuition of computation is mechanical, not mathematical. + +A pure, higher-order, typed, strict language with mechanical +intuition and no mathematical roots seems best positioned to +defeat this obstacle. Its inference algorithm should be almost +but not quite as strong as Hindley-Milner unification, perhaps +inferring "forward but not backward." + +We'd also like two other features from our types. One, a type +should define a subset of values against a generic data model, +the way a DTD defines a set of XML values. Two, defining a type +should mean defining an executable function, whose range is the +type, that verifies or normalizes a generic value. Why these +features? See the next section... + +#### High-level communication + +A planet could certainly use a network type descriptor that was +like a MIME type, if a MIME type was an executable specification +and could validate incoming content automatically. After ORM, +manual data validation must be the second leading cause of +boilerplate. If we have a language in which a type is also a +validator, the automatic validation problem seems solvable. We +can get to something very like a typed RPC. + +Exactly-once message delivery semantics are impossible unless the +endpoints have orthogonal persistence. Then they're easy: use a +single persistent session with monotonically increasing sequence +numbers. It's nice not worrying about idempotence. + +With an integrated OS and protocol, it's possible to fully +implement the end-to-end principle, and unify the application +result code with the packet acknowledgment -- rather than having +three different layers of error code. Not only is every packet a +transaction at the system level; every message is a transaction +at the application level. Applications can even withhold an +acknowledgment to signal internal congestion, sending the other +end into exponential backoff. + +It's also nice to support simple higher-level protocol patterns +like publish/subscribe. We don't want every application to have +to implement its own subscription queue backpressure. Also, if we +can automatically validate content types, perhaps we can also +also diff and patch them, making remote syncing practical. + +The web will be around for a while. It would be great to have a +web server that let a browser page with an SSO login authenticate +itself as another planet, translating messages into JSON. This +way, any distributed application is also a web application. + +Finally, a planet is also a great web *client*. There is lots of +interesting data behind HTTP APIs. A core mission of a planet is +collecting and maintaining the secrets the user needs to manage +any off-planet data. (Of course, eventually this data should come +home, but it may take a while.) The planet's operating system +should serve as client-side middleware that mediates the API +authentication process, letting the programmer program against +the private web as if it was public, using site-specific auth +drivers with user-configured secrets. + +#### Global namespace + +The URL taught us that any global identity scheme is the root of +a global namespace. But the URL also made a big mistake: +mutability. + +A global namespace is cool. An immutable (referentially +transparent) global namespace, which can use any data in the +universe as if it was a constant, is really cool. It's even +cooler if, in case your data hasn't been created yet, your +calculation can block waiting for it. Coolest of all is when the +data you get back is typed, and you can use it in your typed +functional language just like dereferencing a pointer. + +Of course, an immutable namespace should be a distributed version +control system. If we want typed data, it needs to be a typed +DVCS, clearly again with type-specific diff and patch. Also, our +DVCS (which already needs a subscription mechanism to support +blocking) should be very good at reactive syncing and mirroring. + +#### Semantic drivers + +One unattractive feature of a pure interpreter is that it exacts +an inescapable performance tax -- since an interpreter is always +slower than native code. This violates the prime directive of OS +architecture: the programmer must never pay for any service that +doesn't get used. Impure interpreters partly solve this problem +with a foreign-function interface, which lets programmers move +inner loops into native code and also make system calls. An FFI +is obviously unacceptable in a deterministic computer. + +A pure alternative to the FFI is a semantic registry in which +functions, system or application, can declare their semantics in +a global namespace. A smart interpreter can recognize these +hints, match them to a checksum of known good code, and run a +native driver that executes the function efficiently. This +separates policy (pure algorithm as executable specification) +from mechanism (native code or even hardware). + +#### Repository-driven updates + +A planet owner is far too busy to manually update applications. +They have to update themselves. + +Clearly the right way to execute an application is to run it +directly from the version-control system, and reload it when a +new version triggers any build dependencies. But in a planet, the +user is not normally the developer. To install an app is to +mirror the developer's repository. The developer's commit starts +the global update process by updating mirror subscriptions. + +Of course, data structures may change, requiring the developer to +include an adapter function that converts the old state. (Given +orthogonal persistence, rebooting the app is not an option.) + +Added fun is provided by the fact that apps use the content types +defined in their own repository, and these types may change. +Ideally these changes are backward compatible, but an updated +planet can still find itself sending an un-updated planet +messages that don't validate. This race condition should block +until the update has fully propagated, but not throw an error up +to the user level -- because no user has done anything wrong. + +### Why not a planet built on JS or JVM? + +Many programmers might accept our reasoning at the OS level, but +get stuck on Urbit's decision not to reuse existing languages or +interpreters. Why not JS, JVM, Scheme, Haskell...? The planet is +isolated from the old layer, but can't it reuse mature designs? + +One easy answer is that, if we're going to be replacing Unix and +the Internet, or at least tiling over them, rewriting a bit of +code is a small price to pay for doing it right. Even learning a +new programming language is a small price to pay. And an +essential aspect of "doing it right" is a system of components +that fit together perfectly; we need all the simplicity wins we +can get. + +But these are big, hand-waving arguments. It takes more than this +kind of rhetoric to justify reinventing the wheel. Let's look at +a few details, trying not to get ahead of ourselves. + +In the list above, only JS and the JVM were ever designed to be +isolated. The others are tools for making POSIX system calls. +Isolation in JS and the JVM is a client thing. It is quite far +from clear what "node.js with browser-style isolation" would even +mean. And who still uses Java applets? + +Let's take a closer look at the JS/JVM options - not as the only +interpreters in the world, just as good examples. Here are some +problems we'd need them to solve, but they don't solve - not, at +least, out of the box. + +First: repeatability. JS and the JVM are not frozen, but warm; +they release new versions with backward compatibility. This means +they have "current version" state outside the planet proper. Not +lethal but not good, either. + +When pure, JS and then JVM are at least nominally deterministic, +but they are also used mainly on transient data. It's not clear +that the the actual implementations and specifications are built +for the lifecycle of a planet - which must never miscompute a +single bit. (ECC servers are definitely recommended.) + +Second: orthogonal persistence. Historically, successful OP +systems are very rare. Designing the language and OS as one unit +seems intuitively required. + +One design decision that helps enormously with OP is an acyclic +data model. Acyclic data structures are enormously easier to +serialize, to specify and validate, and of course to +garbage-collect. Acyclic databases are far more common than +cyclic ("network" or "object") databases. Cyclic languages are +more common than acyclic languages -- but pure functional +languages are acyclic, so we know acyclic programming can work. + +(It's worth mentioning existing image-oriented execution +environments - like Smalltalk and its descendants, or even the +Lisp machine family. These architectures (surely Urbit's closest +relatives) could in theory be adapted to use as orthogonally +persistent databases, but in practice are not designed for it. +For one thing, they're all cyclic. More broadly, the assumption +that the image is a GUI client in RAM is deeply ingrained.) + +Third: since a planet is a server and a server is a real OS, its +interpreter should be able to efficiently virtualize itself. +There are two kinds of interpreter: the kind that can run an +instance of itself as a VM, and the kind that can't. + +JS can almost virtualize itself with `eval`, but `eval` is a toy. +(One of those fun but dangerous toys -- like lawn darts.) And +while it's not at all the same thing, the JVM can load applets -- +or at least, in 1997 it could... + +(To use some Urbit concepts we haven't met yet: with semantic +drivers (which don't exist in JS or the JVM, although asm.js is a +sort of substitute), we don't even abandon all the world's JS or +JVM code by using Urbit. Rather, we can implement the JS or JVM +specifications in Hoon, then jet-propel them with practical Unix +JS or JVM engines, letting us run JS or Java libraries.) + +Could we address any or all of these problems in the context of +JS, the JVM or any other existing interpreter? We could. This +does not seem likely to produce results either better or sooner +than building the right thing from scratch. + +Definition +========== + +An operating function (OF) is a logical computer whose state is a +fixed function of its input history: + + V(I) => T + +where `T` is the state, `V` is the fixed function, `I` is the +list of input events from first to last. + +Intuitively, what the computer knows is a function of what it's +heard. If the `V` function is identical on every computer for all +time forever, all computers which hear the same events will learn +the same state from them. + +Is this function a protocol, an OS, or a database? All three. +Like an OS, `V` specifies the semantics of a computer. Like a +protocol, `V` specifies the semantics of its input stream. And +like a database, `V` interprets a store of persistent state. + +Advantages +---------- + +This is a very abstract description, which doesn't make it easy +to see what an OF is useful for. + +Two concrete advantages of any practical OF (not just Urbit): + +### Orthogonal persistencе + +An OF is inherently a single-level store. `V(I) => T` is an +equation for the lifecycle of a computer. It doesn't make any +distinction between transient RAM and persistent disk. + +The word "database" has always conflated two concepts: data that +isn't transient; structures specialized for search. A substantial +percentage of global programmer-hours are spent on translating +data between memory and disk formats. + +Every practical modern database computes something like +`V(I) => T`. `I` is the transaction log. An OF is an ACID +database whose history function (log to state) is a +general-purpose computer. A transaction is an event, an event is +a transaction. + +(Obviously a practical OF, though still defined as `V(I) => T`, +does not recompute its entire event history on every event. +Rather, it computes some incremental iterator `U` that can +compute `U(E T0) => T1`, where `E` is each event in `I`. + +In plain English, the next state `T1` is a function `U` of the +latest event `E` and the current state `T0`. We can assume that +some kind of incrementality will fall out of any reasonable `V`.) + +In any reasonable OF, you can write a persistent application by +leaving your data in the structures you use it from. Of course, +if you want to use specialized search structures, no one is +stopping you. + +### Repeatable computing + +Every computer is deterministic at the CPU level, in the sense +that the CPU has a manual which is exactly right. But it is not +actually practical to check the CPU's work. + +"Repeatable computing" is high-level determinism. It's actually +practical to audit the validity of a repeatable computer by +re-executing its computation history. For an OF, the history is +the event log. + +Not every urbit is a bank; not every urbit has to store its full +log. Still, the precision of a repeatable computer also affects +the user experience. (It's appalling, for instance, how +comfortable the modern user is with browser hangs and crashes.) + +Requirements +------------ + +`V` can't be updated for the lifecycle of the computer. Or, since +`V` is also a protocol, for the lifetime of the network. + +So `V` needs to be perfect, which means it needs to be small. +It's also easier to eradicate ambiguity in a small definition. + +But we're designing a general-purpose computer which is +programmed by humans. Thus `V` is a programming language in some +sense. Few practical languages are small, simple or perfect. + +`V` is a practical interpreter and should be reasonably fast. But +`V` is also a (nonpreemptive) operating system. One universal +feature of an OS is the power to virtualize user-level code. So +we need a small, simple, perfect interpreter which can +efficiently virtualize itself. + +`V` is also a system of axioms in the mathematical sense. Axioms +are always stated informally. This statement succeeds iff it +accurately communicates the same axioms to every competent +reader. Compressing the document produces a rough metric of +information content: the complexity of `V`. + +(It's possible to cheat on this test. For instance, we could +design a simple `V` that only executes another interpreter, `W`. +`W`, the only program written in V's language, is simply encoded +in the first event. Which could be quite a large event. + +This design achieves the precision of `V`, but not its stability. +For stability, the true extent of `V` is the semantic kernel that +the computer can't replace during its lifetime from events in the +input history. Thus `V` properly includes `W`.) + +There are no existing interpreters that ace all these tests, so +Urbit uses its own. Our `V` is defined in 350 bytes gzipped. + +Nouns +----- + +A value in Urbit is a *noun*. A noun is an *atom* or a *cell*. An +atom is an unsigned integer of any size. A cell is an ordered +pair of nouns. + +In the system equation `V(I) => T`, `T` is a noun, `I` is a list +of nouns - where a list is either `0` or a cell `[item list]`. +The `V` function is defined below. + +Nock +---- + +Nock or `N` is a combinator interpreter on nouns. It's specified +by these pseudocode reduction rules: + + Nock(a) *a + [a b c] [a [b c]] + + ?[a b] 0 + ?a 1 + +[a b] +[a b] + +a 1 + a + =[a a] 0 + =[a b] 1 + =a =a + + /[1 a] a + /[2 a b] a + /[3 a b] b + /[(a + a) b] /[2 /[a b]] + /[(a + a + 1) b] /[3 /[a b]] + /a /a + + *[a [b c] d] [*[a b c] *[a d]] + + *[a 0 b] /[b a] + *[a 1 b] b + *[a 2 b c] *[*[a b] *[a c]] + *[a 3 b] ?*[a b] + *[a 4 b] +*[a b] + *[a 5 b] =*[a b] + + *[a 6 b c d] *[a 2 [0 1] 2 [1 c d] [1 0] 2 [1 2 3] [1 0] 4 4 b] + *[a 7 b c] *[a 2 b 1 c] + *[a 8 b c] *[a 7 [[7 [0 1] b] 0 1] c] + *[a 9 b c] *[a 7 c 2 [0 1] 0 b] + *[a 10 [b c] d] *[a 8 c 7 [0 3] d] + *[a 10 b c] *[a c] + + *a *a + +Note that operators 6-10 are macros and not formally essential. +Note also that Nock reduces invalid reductions to self, thus +specifying nontermination (bottom). + +A valid Nock reduction takes a cell `[subject formula]`. The +formula defines a function that operates on the subject. + +A valid formula is always a cell. If the head of the formula is a +cell, Nock reduces head and tail separately and produces the cell +of their products ("autocons"). If the head is an atom, it's an +operator from `0` to `10`. + +In operators `3`, `4`, and `5`, the formula's tail is another +formula, whose product is the input to an axiomatic function. `3` +tests atom/cell, `4` increments, `5` tests equality. + +In operator `0`, the tail is a "leg" (subtree) address in the +subject. Leg `1` is the root, `2n` the left child of `n`, `2n+1` +the right child. + +In operator `1`, the tail is produced as a constant. In operator +`2`, the tail is a formula, producing a `[subject formula]` cell +which Nock reduces again (rendering the system Turing complete). + +In the macro department, `6` is if-then-else; `7` is function +composition; `8` is a stack push; `9` is a function call; `10` is +a hint. + +Surprisingly, Nock is quite a practical interpreter, although it +poses unusual implementation challenges. For instance, the only +arithmetic operation is increment (see the implementation issues +section for how this is practical). Another atypical feature is +that Nock can neither test pointer equality, nor create cycles. + +The fixed function +------------------ + +So is `V` just Nock? Almost. Where `N` is Nock, `V` is: + + V(I) == N(I [2 [0 3] [0 2]]) + +If Nock was not `[subject formula]` but `[formula subject]`, not +`N(S F)` but `N(F S)`, V would be Nock. + +In either case, the head `I0` of `I` is the boot formula; the +tail is the boot subject. Intuitively: to interpret the event +history, treat the first event as a program; run that program on +the rest of history. + +More abstractly: the event sequence `I` begins with a *boot +sequence* of non-uniform events, which do strange things like +executing each other. Once this sequence completes, we end up in +a *main sequence* of uniform events (starting at `I5`), which +actually look and act like actual input. + +`I0`: The lifecycle formula +--------------------------- + +`I0` is special because we run it in theory but not in practice. +Urbit is both a logical definition and a practical interpreter. +Sometimes there's tension between these goals. But even in +practice, we check that `I0` is present and correct. + +`I0` is a nontrivial Nock formula. Let's skip ahead and write it +in our high-level language, Hoon: + + => [load rest]=. + =+ [main step]=.*(rest load) + |- ?~ main step + $(main +.main, step (step -.main)) + +Or in pseudocode: + + from pair $load and $rest + let pair $main and $step be: + the result of Nock $load run on $rest + loop + if $main is empty + return $step + else + continue with + $main set to tail of $main, + $step set to: + call function $step on the head of $main + +In actual Nock (functional machine code, essentially): + + [8 [2 [0 3] [0 2]] 8 [ 1 6 [5 [1 0] 0 12] [0 13] 9 2 + [0 2] [[0 25] 8 [0 13] 9 2 [0 4] [0 56] 0 11] 0 7] 9 2 0 1] + +`I0` is given the sequence of events from `I1` on. It takes `I1`, +the boot loader, and runs it against the rest of the sequence. +`I1` consumes `I2`, `I3`, and `I4`, and then it produces the true +initial state function `step` and the rest of the events from +`I5` on. + +`I0` then continues on to the main sequence, iteratively +computing `V(I)` by calling the `step` function over and over on +each event. Each call to `step` produces the next state, +incorporating the changes resulting from the current event. + +`I1`: the language formula +-------------------------- + +The language formula, `I1`, is another nontrivial Nock formula. +Its job is to compile `I3`, the Hoon compiler as source, with +`I2`, the Hoon compiler as a Nock formula. + +If `I3`, compiled with `I2`, equals `I2`, `I1` then uses `I2` to +load `I4`, the Arvo source. The main event sequence begins with +`I5`. The only events which are Nock formulas are `I0`, `I1`, and +`I2`; the only events which are Hoon source are `I3` and `I4`. + +`I1`, in pseudo-Hoon: + + => [fab=- src=+< arv=+>- seq=+>+] + =+ ken=(fab [[%atom %ud] 164] src) + ?> =(+>.fab +.ken) + [seq (fab ken arv)] + +In pseudocode: + + with tuple as [$fab, $src, $arv, $seq], // these are I2 I3 I4 and I5... + let $ken be: + call $fab on type-and-value "uint 164" and Hoon $src + assert environment from $fab is equal to the value from $ken + return pair of the remaining $seq and: + call $fab on environment $ken and Hoon $arv + +In actual Nock: + + [7 [0 2] 8 [8 [0 2] 9 2 [0 4] [7 [0 3] [1 [1.836.020.833 + 25.717] 164] 0 6] 0 11] 6 [5 [0 27] 0 5] [[0 31] 8 [0 6] + 9 2 [0 4] [7 [0 3] [0 2] 0 30] 0 11] 0 0] + +(`25.717`? Urbit uses the German dot notation for large atoms.) + +We compile the compiler source, check that it produces the same +compiler formula, and then compile the OS with it. + +Besides `I0` and `I1`, there's no Nock code in the boot sequence +that doesn't ship with Hoon source. + +`I2` and `I3`: the Hoon compiler +-------------------------------- + +Again, `I2` is the Hoon compiler (including basic libraries) as a +Nock formula. `I3` is the same compiler as source. + +Hoon is a strict, typed, higher-order pure functional language +which compiles itself to Nock. It avoids category theory and +aspires to a relatively mechanical, concrete style. + +The Hoon compiler works with three kinds of noun: `nock`, a Nock +formula; `twig`, a Hoon AST; and `span`, a Hoon type, which +defines semantics and constraints on some set of nouns. + +There are two major parts of the compiler: the front end, which +parses a source file (as a text atom) to a twig; and the back +end, `ut`, which accepts a subject span and a twig, and compiles +them down to a product span and a Nock formula. + +### Back end and type system + +The Hoon back end, (`ut`), about 1700 lines of Hoon, performs +type inference and (Nock) code generation. The main method of +`ut` is `mint`, with signature `$+([span twig] [span Nock])` +(i.e., in pseudocode, `mint(span, twig) -> [span Nock]`). + +Just as a Nock formula executes against a subject noun, a Hoon +expression is always compiled against a subject span. From this +`[span twig]`, `mint` produces a cell `[span Nock]` . The Nock +formula generates a useful product from any noun in the subject +span. The span describes the set of product nouns. + +Type inference in Hoon uses only forward tracing, not unification +(tracing backward) as in Hindley-Milner (Haskell, ML). Hoon needs +more user annotation than a unification language, but it's easier +for the programmer to follow what the algorithm is doing - just +because Hoon's algorithm isn't as smart. + +But the Hoon type system can solve most of the same problems as +Haskell's, notably including typeclasses / genericity. For +instance, it can infer the type of an AST by compiling a grammar +in the form of an LL combinator parser (like Hoon's own grammar). + +The Hoon type system, slightly simplified for this overview: + + ++ span $| $? %noun + %void + == + $% [%atom p=cord] + [%cell p=span q=span] + [%core p=span q=(map term twig)] + [%cube p=noun q=span] + [%face p=term q=span] + [%fork p=span q=span] + [%hold p=span q=twig] + == + +In pseudocode: + + define $span as one of: + 'noun' + 'void' + 'atom' with text aura + 'cell' of $span and $span + 'core' of environment $span and map of name to code AST + 'cube' of constant value and $span + 'face' of name wrapping $span + 'fork' of $span and alternate $span + 'hold' of subject $span and continuation AST + +(The syntax `%string` means a *cord*, or atom with ASCII bytes in +LSB first order. Eg, `%foo` is also 0x6f.6f66 or 7.303.014.) + +The simplest spans are `%noun`, the set of all nouns, and +`%void`, the empty set. + +Otherwise, a span is one of `%atom`, `%cell`, `%core`, `%cube`, +`%face`, `%fork` or `%hold`, each of which is parameterized with +additional data. + +An `%atom` is any atom, plus an *aura* cord that describes both +what logical units the atom represents, and how to print it. For +instance, `%ud` is an unsigned decimal, `%ta` is ASCII text, +`%da` is a 128-bit Urbit date. `%n` is nil (`~`). + +A `%cell` is a recursively typed cell. `%cube` is a constant. +`%face` wraps another span in a name for symbolic access. `%fork` +is the union of two spans. + +A `%core` is a combination of code and data - Hoon's version of +an object. It's a cell `[(map term formula) payload]`, i.e. a set +of named formulas (essentially "computed attributes") and a +"payload", which includes the context the core is defined in. +Each arm uses the whole core as its subject. + +One common core pattern is the *gate*, Hoon's version of a +lambda. A gate is a core with a single formula, and whose payload +is a cell `[arguments context]`. To call the function, replace +the formal arguments with your actual argument, then apply. The +context contains any data or code the function may need. + +(A core is not exactly an object, nor a map of names to formulas +a vtable. The formulas are computed attributes. Only a formula +that produces a gate is the equivalent of a method.) + +### Syntax, text + +Hoon's front end (`vast`) is, at 1100 lines, quite a gnarly +grammar. It's probably the most inelegant section of Urbit. + +Hoon is a keyword-free, ideographic language. All alphabetic +strings are user text; all punctuation is combinator syntax. The +*content* of your code is always alphabetic; the *structure* is +always punctuation. And there are only 80 columns per line. + +A programming language is a UI for programmers. UIs should be +actually usable, not apparently usable. Some UI tasks are harder +than they appear; others are easier. Forming associative memories +is easier than it looks. + +Languages do need to be read out loud, and the conventional names +for punctuation are clumsy. So Hoon replaces them: + + ace [1 space] gal < pel ( + bar | gap [>1 space, nl] per ) + bas \ gar > sel [ + buc $ hax # sem ; + cab _ hep - ser ] + cen % kel { soq ' + col : ker } tar * + com , ket ^ tec ` + doq " lus + tis = + dot . pam & wut ? + fas / pat @ zap ! + +For example, `%=` sounds like "centis" rather than "percent +equals." Since even a silent reader will subvocalize, the length +and complexity of the sound is a tax on reading the code. + +A few digraphs also have irregular sounds: + + == stet + -- shed + ++ slus + -> dart + -< dusk + +> lark + +< lush + +Hoon defines over 100 digraph ideograms (like `|=`). Ideograms +(or *runes*) have consistent internal structure; for instance, +every rune with the `|` prefix produces a core. There are no +user-defined runes. These facts considerably mitigate the +memorization task. + +Finally, although it's not syntax, style merits a few words. +While names in Hoon code can have any internal convention the +programmer wants (as long as it's lowercase ASCII plus hyphen), +one convention we use a lot: face names which are random +three-letter syllables, arm names which are four-letter random +Scrabble words. + +Again, forming associative memories is easier than it looks. +Random names quickly associate to their meanings, even without +any mnemonic. A good example is the use of Greek letters in math. +As with math variables, consistency helps too - use the same name +for the same concept everywhere. + +This "lapidary" convention is appropriate for code that's +nontrivial, but still clean and relatively short. But code can't +be clean unless it's solving a clean problem. For dirty problems, +long informative names and/or kebab-case are better. For trivial +problems (like defining `add`), we use an even more austere +"hyperlapidary" style with one-letter names. + +### Twig structure + +A Hoon expression compiles to a `twig`, or AST node. A twig is +always a cell. + +Like Nock formulas, Hoon twigs "autocons." A twig whose head is a +cell is a pair of twigs, which compiles to a pair of formulas, +which is a formula producing a pair. ("autocons" is an homage to +Lisp, whose `(cons a (cons b c))` becomes Urbit's `[a b c]`.) + +Otherwise, a twig is a tagged union. Its tag is always a rune, +stored by its phonetic name as a cord. Because it's normal for +31-bit numbers to be direct (thus, somewhat more efficient), we +drop the vowels. So `=+` is not `%tislus`, but `%tsls`. + +There are too many kinds of twig to enumerate here. Most are +implemented as macros; only about 25 are directly translated to +Nock. There is nothing sophisticated about twigs - given the +syntax and type system, probably anyone would design the same +twig system. + +### Syntax geometry + +Just visually, one nice feature of imperative languages is the +distinction between statements, which want to grow vertically +down the page, and expressions, which prefer to grow +horizontally. + +Most functional languages, inherently lacking the concept of +statements, have a visual problem: their expressions grow best +horizontally, and often besiege the right margin. + +Hoon has two syntax modes: "tall," which grows vertically, and +"wide," which grows horizontally. Tall code can contain wide +code, but not vice versa. In general, any twig can be written in +either mode. + +Consider the twig `[%tsls a b]`, where `a` and `b` are twigs. +`=+` defines a variables, roughly equivalent to Lisp's `let`. `a` +defines the variable for use in `b`. In wide mode it looks like +an expression: `=+(a b)`. In tall mode, the separator is two +spaces or a newline, without parentheses: + + =+ a + b + +which is the same code, but looks like a "statement." The +deindentation is reasonable because even though `b` is, according +to the AST, "inside" the `=+`, it's more natural to see it +separated out. + +For twigs with a constant number of components, like +`[%tsls a b]`, tall mode relies on the parser to stop itself. +With a variable fanout we need a `==` terminator: + + :* a + b + == + +The indentation rules of constant-fanout twigs are unusual. +Consider the stem `?:`, which happens to have the exact same +semantics as C `?:` (if-then-else). Where in wide form we'd write +`?:(a b c)`, in tall form the convention is + + ?: a + b + c + +This is *backstep* indentation. Its motivation: defend the right +margin. Ideally, `c` above is a longer code block than `a` or +`b`. And in `c`, we have not lost any margin at all. Ideally, we +can write an arbitrarily tall and complex twig that always grows +vertically and never has a margin problem. Backstep indentation +is tail call optimization for syntax. + +Finally, the wide form with rune and parentheses (like `=+(a b)`) +is the *normal* wide form. Hoon has a healthy variety of +*irregular* wide forms, for which no principles at all apply. But +using normal forms everywhere would get quite cumbersome. + +`I4`: Arvo +---------- + +`I4` is the source code for Arvo: about 600 lines of Hoon. This +is excessive for what Arvo does and can probably be tightened. Of +course, it does not include the kernel modules (or *vanes*). + +`I4` formally produces the kernel step function, `step` for `I0`. +But in a practical computer, we don't run `I0`, and there are +other things we want to do besides `step` it. Inside `step` is +the Arvo core (leg 7, context), which does the actual work. + +Arvo's arms are `keep`, `peek`, `poke`, `wish`, `load`. + +`wish` compiles a Hoon string. `keep` asks Arvo when it wants to +be woken up next. `peek` exposes the Arvo namespace. These three +are formally unnecessary, but useful in practice for the Unix +process that runs Arvo. + +The essential Arvo arm is `poke`, which produces a gate which +takes as arguments the current time and an event. (Urbit time is +a 128-bit atom, starting before the universe and ending after it, +with 64 bits for subseconds and 64 bits for seconds, ignoring any +post-2015 leap seconds.) + +The product of the `poke` gate is a cell `[action-list Arvo]`. So +Arvo, poked with the time and an event, produces a list of +actions and a new Arvo state. + +Finally, `load` is used to replace the Arvo core. When we want to +change Hoon or Arvo proper, we build a new core with our new +tools, then start it by passing `load` the old core's state. +(State structures in the new core need not match the old, so the +new core may need adapter functions.) `load` produces its new +core wrapped in the canonical outer `step` function. + +Since the language certainly needs to be able to change, the +calling convention to the next generation is a Nock convention. +`load` is leg `54` of the core battery, producing a gate whose +formal argument at leg `6` is replaced with the Arvo state, then +applied to the formula at leg `2`. Of course, future cores can +use a different upgrade convention. Nothing in Arvo or Urbit is +technically frozen, except for Nock. + +State +----- + +Arvo has three pieces of dynamic state: its network address, or +*plot*; an entropy pool; the kernel modules, or *vanes*. + +Configuration overview +---------------------- + +The first main-sequence event, `I5`, is `[%init plot]`, with some +unique Urbit address (*plot*) whose secrets this urbit controls. +The plot, an atom under 2\^128, is both a routing address and a +cryptographic name. Every Urbit event log starts by setting its +own unique and permanent plot. + +From `I6` we begin to install the vanes: kernel modules. Vanes +are installed (or reloaded) by a `[%veer label code]` event. Vane +labels by convention are cords with zero or one letter. + +The vane named `%$` (zero, the empty string) is the standard +library. This contains library functions that aren't needed in +the Hoon kernel, but are commonly useful at a higher level. + +The standard library is compiled with the Hoon kernel (`I2`) as +subject. The other vanes are compiled with the standard library +(which now includes the kernel) as subject. Vane `%a` handles +networking; `%c`, storage; `%f`, build logic; `%g`, application +lifecycle; `%e`, http interface; `%d`, terminal interface; `%b`, +timers; and `%j`, storage of secrets. + +Mechanics +--------- + +Vanes are stored as a cell of a noun with its span. At least in +Urbit, there is no such thing as a dynamic type system - only a +static type system executed at runtime. + +Storing vanes with their spans has two benefits. One, it lets us +type-check internal event dispatch. Two, it lets us do typed +upgrades when we replace a vane. It makes routine dispatch +operations incur compiler cost, but this caches well. + +Input and output +---------------- + +From the Unix process's perspective, Arvo consumes events (which +Unix generates) and produces actions (which Unix executes). The +most common event is hearing a UDP packet; the most common action +is sending a UDP packet. But Arvo also interacts (still in an +event-driven pattern) with other Unix services, including HTTP, +the console, and the filesystem. + +Events and actions share the `ovum` structure. An ovum is a cell +`[wire card]`. A wire is a path - a list of cords, like +`[%foo %bar %baz ~]`, usually written with the irregular syntax +`/foo/bar/baz`. A card is a tagged union of all the possible +types of events and effects. + +A wire is a *cause*. For example, if the event is an HTTP request +`%thus`, the wire will contain (printed to cords) the server port +that received the request, the IP and port of the caller, and the +socket file descriptor. + +The data in the wire is opaque to Urbit. But an Urbit `poke`, in +response to this event or to a later one, can answer the request +with an action `%this` - an HTTP response. Unix parses the action +wire it sent originally and responds on the right socket file +descriptor. + +Moves and ducts +--------------- + +Event systems are renowned for "callback hell". As a purely +functional environment, Arvo can't use callbacks with shared +state mutation, a la node. But this is not the only common +pitfall in the event landscape. + +A single-threaded, nonpreemptive event dispatcher, like node or +Arvo, is analogous to a multithreaded preemptive scheduler in +many ways. In particular, there's a well-known duality between +event flow and control flow. + +One disadvantage of many event systems is unstructured event +flow, often amounting to "event spaghetti". Indeed, the +control-flow dual of an unstructured event system is `goto`. + +Arvo is a structured event system whose dual is a call stack. An +internal Arvo event is called a "move". A move has a duct, which +is a list a wires, each of which is analagous to a stack frame. +Moves come in two kinds: a `%pass` move calls upward, pushing a +frame to the duct, while a `%give` move returns a result +downward, popping a frame off the duct. + +`%pass` contains a target vane name; a wire, which is a return +pointer that defines this move's cause within the source vane; +and a card, the event data. `%give` contains just the card. + +The product of a vane event handler is a cell +`[moves new-state]`, a list of moves and the vane's new state. + +On receiving a Unix event, Arvo turns it into a `%pass` by +picking a vane from the Unix wire (which is otherwise opaque), +then pushes it on a move stack. + +The Arvo main loop pops a move off the move stack, dispatches it, +replaces the result vane state, and pushes the new moves on the +stack. The Unix event terminates when the stack is empty. + +To dispatch a `%pass` move sent by vane `%x`, to vane `%y`, with +wire `/foo/bar`, duct `old-duct`, and card `new-card`, we pass +`[[/x/foo/bar old-duct] new-card]`, a `[duct card]` cell, to the +`call` method on vane `%y`. In other words, we push the given +wire (adding the vane name onto the front) on to the old duct to +create the new duct, and then pass that along with the card to +the other vane. + +To dispatch a `%give` move returned by vane `%x`, we check if the +duct is only one wire deep. If so, return the card as an action +to Unix. If not, pull the original calling vane from the top wire +on the duct (by destructuring the duct as +`[[vane wire] plot-duct]`), and call the `take` method on `vane` +with `[plot-duct wire card]`. + +Intuitively, a pass is a service request and a give is a service +response. The wire contains the information (normalized to a list +of strings) that the caller needs to route and process the +service result. The effect always gets back to its cause. + +A good example of this mechanism is any internal service which is +asynchronous, responding to a request in a different *system* +event than its cause - perhaps many seconds later. For instance, +the `%c` vane can subscribe to future versions of a file. + +When `%c` gets its service request, it saves it with the duct in +a table keyed by the subscription path. When the future version +is saved on this path, the response is sent on the duct it was +received from. Again, the effect gets back to its cause. +Depending on the request, there may even be multiple responses to +the same request, on the same duct. + +In practice, the structures above are slightly simplified - Arvo +manages both vanes and moves as vases, `[span noun]` cells. Every +dispatch is type-checked. + +One card structure that Arvo detects and automatically unwraps is +`[%meta vase]` - where the vase is the vase of a card. `%meta` +can be stacked up indefinitely. The result is that vanes +themselves can operate internally at the vase level - dynamically +executing code just as Arvo itself does. + +The `%g` vane uses `%meta` to expose the vane mechanism to +user-level applications. The same pattern, a core which is an +event transceiver, is repeated across four layers: Unix, Arvo, +the `%g` vane, and the `:dojo` shell application. + +Event security +-------------- + +Arvo is a "single-homed" OS which defines one plot, usually with +the label `our`, as its identity for life. All vanes are fully +trusted by `our`. But internal security still becomes an issue +when we execute user-level code which is not fully trusted, or +work with data which is not trusted. + +Our security model is causal, like our event system. Every event +is a cause and an effect. Event security is all about *who/whom*: +*who* (other than us) caused this event; *whom* (other than us) +it will affect. `our` is always authorized to do everything and +hear anything, so strangers alone are tracked. + +Every event has a security `mask` with two fields `who` and +`hum`, each a `(unit (set plot))`. (A `unit` is the equivalent of +Haskell's `Maybe` - `(unit x`) is either `[~ x]` or `~`, where +`~` is nil.) + +If `who` is `~`, nil, anyone else could have caused this move -- +in other words, it's completely untrusted. If `who` is `[~ ~]`, +the empty set, no one else caused this move -- it's completely +trusted. Otherwise, the move is "tainted" by anyone in the set. + +If `hum` is `~`, nil, anyone else can be affected by this move -- +in other words, it's completely unfiltered. If `hum` is `[~ ~]`, +the empty set, no one else can hear the move -- it's completely +private. Otherwise, the move is permitted to "leak" to anyone in +the set. + +Obviously, in most moves the security mask is propagated from +cause to effect without changes. It's the exceptions that keep +security exciting. + +Namespace +--------- + +Besides `call` and `take`, each vane exports a `scry` gate whose +argument is a `path` - a list of strings, like a wire. + +`scry` implements a global monotonic namespace - one that (a) +never changes its mind, for the lifetime of the urbit; and (b) +never conflicts across well-behaved urbits. + +This invariant is semantic - it's not enforced by the type +system. Getting it right is up to the vane's developer. +Installing kernel modules is always a high-trust operation. + +The product of the `scry` gate is `(unit (unit value))`. + +So `scry` can produce `~`, meaning the path is not yet bound; +`[~ ~]`, meaning the path is bound to empty; or `[~ ~ value]`, an +actual value is bound. This value is of the form +`[mark span noun]`, where `span` is the type of the noun and +`mark` is a higher-level "type" label best compared to a MIME +type or filename extension. Marks are discussed under the `%f` +vane. + +Vane activation and namespace invariant +--------------------------------------- + +The vane that Arvo stores is not the core that exports `call` +etc, but a gate that produces the core. The argument to this gate +is a cell `[@da $+(path (unit (unit value)))]`. Note that +`$+(path (unit (unit value)))` is just the function signature of +the `scry` namespace. + +So, the head of the argument is the current time; the tail is the +system namespace. The general namespace is constructed from the +`scry` method of the vanes. The first letter of the head of the +path identifies the vane; the rest of the head, with the rest of +the path, is passed to the vane. The vane core is activated just +this way for all its methods, including `scry` itself. + +This answers the question of how to expose dynamic state without +violating our referential transparency invariants. If we require +the query to include exactly the current time (which is +guaranteed to change between events) in the path, then we are +able to respond with the current state of the dynamic data. If +the path doesn't contain the current time, then fail with +`[~ ~]`. This technique is only needed, of course, when you don't +know what the state was in the past. + +Additionally, since each vane knows the plot it's on, a `scry` +query with the plot in the path is guaranteed to be globally +unique. + +But vanes do not have to rely on these safe methods to maintain +monotonicity - for instance, the `%c` revision-control vane binds +paths around the network and across history. + +### A tour of the vanes + +The vanes are separately documented, because that's the entire +point of vanes. Let's take a quick tour through the system as it +currently stands, however. + +### `%a` `%ames`: networking + +`%a` is the network vane, currently `%ames` (2000 lines). `%ames` +implements an encrypted P2P network over UDP packets. + +As a vane, `%ames` provides a simple `%pass` service: send a +message to another plot. The message is an arbitrary +`[wire noun]` cell: a message channel and a message body. `%ames` +will respond with a `%give` that returns either success or +failure and an error dump. + +`%ames` messages maintain causal integrity across the network. +The sender does not send the actual duct that caused the message, +of course, but an opaque atom mapped to it. This opaque `bone`, +plus the channel itself, are the "socket" in the RFC sense. + +Messages are ordered within this socket and delivered exactly +once. (Yes, as normally defined this is impossible. Urbit can do +exactly-once because `%ames` messaging is not a tool to build a +consistency layer, but a tool built on the consistency layer. +Thus, peers can have sequence numbers that are never reset.) + +More subtly, local and remote causes treated are exactly the same +way, improving the sense of network abstraction. CORBA taught us +that it's not possible to abstract RPC over local function calls +without producing a leaky abstraction. The Arvo structured event +model is designed to abstract over messages. + +One unusual feature is that `%ames` sends end-to-end acks. +Acknowledging the last packet received in a message acknowledges +that the message itself has been fully accepted. There is no +separate message-level result code. Like its vane interface, +`%ames` acks are binary: a positive ack has no data, a negative +ack has a dump. Thus, Arvo is transactional at the message level +as well as the packet/event level. + +Another `%ames` technique is dropping packets. For instance, it +is always a mistake to respond to a packet with bad encryption: +it enables timing attacks. Any packet the receiver doesn't +understand should be dropped. Either the sender is deranged or +malicious, or the receiver will receive a future upgrade that +makes a later retransmission of this packet make sense. + +Messages are encrypted in a simple PKI (currently RSA, soon +curve/ed25519) and AES keys are exchanged. Certificate transfer +and key exchange are aggressively piggybacked, so the first +packet to a stranger is always signed but not encrypted. The idea +is that if you've never talked to someone before, probably the +first thing you say is "hello." Which is not interesting for any +realistic attacker. In the rare cases where this isn't true, it's +trivial for the application to work around. (See the network +architecture section for more about the PKI approach.) + +(For packet geeks only: `%ames` implements both "STUN" and "TURN" +styles of peer-to-peer NAT traversal, using parent plots (see +below) as supernodes. If the sending plot lacks a current IP/port +(Urbit uses random ports) for the destination, it forwards across +the parent hierarchy. As we forward a packet, we attach the +sender's address, in case we have a full-cone NAT that can do +STUN. + +When these indirect addresses are received, they are regarded +with suspicion, and duplicate packets are sent - one forwarded, +one direct. Once a direct packet is received, the indirect +address is dropped. Forwarding up the hierarchy always succeeds, +because galaxies (again, see below) are bound into the DNS at +`galaxy.urbit.org`.) + +### `%c` `%clay`: filesystem + +`%c` is the filesystem, currently `%clay` (3000 lines). `%clay` +is a sort of simplified, reactive, typed `git`. + +The `%clay` filesystem uses a uniform inode structure, `ankh`. An +ankh contains: a Merkle hash of this subtree, a set of named +children, and possibly file data. If present, the file data is +typed and marked (again, see the `%f` vane for more about marked +data). + +A path within the tree always contains the plot and desk +(lightweight branch) where the file is located, which version of +the file to use, and what path within the desk the file is at. + +Paths can be versioned in three ways: by change number (for +changes within this desk); by date; or by label. + +Where `git` has multiple parallel states, `%clay` uses separate +desks. For instance, where `git` creates a special merge state, +`%clay` just uses a normal scratch desk. Don't detach your head, +merge an old version to a scratch desk. Don't have explicit +commit messages, edit a log file. `%clay` is a RISC `git`. + +`%clay` is a typed filesystem; not only does it save a type with +each file, but it uses `mark` services from `%f` to perform typed +diff and patch operations that match the mark types. Obviously, +Hunt-McIlroy line diff only works for text files. `%clay` can +store and revise arbitrary custom data structures, not a +traditional revision-control feature. + +The `scry` namespace exported by clay uses the mode feature to +export different views of the filesystem. For instance, +`[%cx plot desk version path]`, which passes `%x` as the mode to +clay's `scry`, produces the file (if any) at that path. `%y` +produces the directory, essentially - the whole ankh with its +subtrees trimmed. `%z` produces the whole ankh. + +`%clay` is reactive; it exports a subscription interface, with +both local and network access. A simple use is waiting for a file +or version which has not yet been committed. + +A more complex use of `%clay` subscription is synchronization, +which is actually managed at the user level within a `%gall` +application (`:hood`). It's straightforward to set up complex, +multistep or even cyclical chains of change propagation. +Intuitively, `git` only pulls; `%clay` both pulls and pushes. + +Finally, the easiest way to use `%clay` is to edit files directly +from Unix. `%clay` can mount and synchronize subtrees in the +Urbit home directory. There are two kinds of mount: export and +sync. While an export mount marks the Unix files read-only, sync +mounts support "dropbox" style direct manipulation. The Unix +process watches the subtree with `inotify()` or equivalent, and +generates filesystem change events automagically. A desk which is +synced with Unix is like your `git` working tree; a merge from +this working desk to a more stable desk is like a `git` commit. + +Access control in `%clay` is an attribute of the desk, and is +either a blacklist or whitelist of plots. Creating and +synchronizing desks is easy enough that the right way to create +complex access patterns is to make a desk for the access pattern, +and sync only what you want to share into it. + +### `%f` `%ford`: builder + +`%f` is the functional build system, currently `%ford` (1800 +lines). It might be compared to `make` et al, but loosely. + +`%ford` builds things. It builds applications, resources, content +type conversions, filter pipelines, more or less any functional +computation specified at a high level. Also, it caches repeated +computations and exports a dependency tracker, so that `%ford` +users know when they need to rebuild. + +`%ford` does all its building and execution in the virtual Nock +interpreter, `mock`. `mock` is a Nock interpreter written in +Hoon, and of course executed in Nock. (See the implementation +issues section for how this is practical.) + +`mock` gives us two extra affordances. One, when code compiled +with tracing hints crashes deterministically, we get a +deterministic stack trace. Two, we add a magic operator `11` +(`.^` in Hoon) which dereferences the global namespace. This is +of course referentially transparent, and `mock` remains a +functional superset of Nock. + +`%ford` is passed one card, `[%exec silk]`, which specifies a +computation - essentially, a makefile as a noun. The `silk` +structure is too large to enumerate here, but some highlights: + +The simplest task of `%ford` is building code, by loading sources +and resources from `%clay`. For kernel components, a source file +is just parsed directly into a `twig`. `%ford` can do a lot of +work before it gets to the `twig`. + +The Hoon parser for a `%ford` build actually parses an extended +build language wrapped around Hoon. Keeping the build +instructions and the source in one file precludes a variety of +exciting build mishaps. + +For any interesting piece of code, we want to include structures, +libraries, and data resources. Some of these requests should be +in the same `beak` (plot/desk/version) as the code we're +building; some are external code, locked to an external version. + +`%ford` loads code from `%clay` with a *role* string that +specifies its use, and becomes the head of the spur. (Ie, the +first path item after plot, desk and version.) Roles: structures +are in `/sur`, libraries in `/lib`, marks in `/mar`, applications +in `/app`, generators in `/gen`, fabricators in `/fab`, filters +in `/tip`, and anything not a Hoon file in `/doc`. + +Data resources are especially interesting, because we often want +to compile whole directory trees of resources into a single noun, +a typed constant from the point of view of the programmer. Also, +requests to `%clay` may block - `%ford` will have to delay giving +its result, and wait until a subscription for the result returns. + +Moreover, for any interesting build, the builder needs to track +dependencies. With every build, `%ford` exports a dependency key +that its customers can subscribe to, so that they get a +notification when a dependency changes. For example, `%gall` uses +this feature to auto-reload applications. + +Finally, both for loading internal resources in a build and as a +vane service (notably used by the web vane `%eyre`), `%ford` +defines a functional namespace which is mapped over the static +document space in `/doc`. + +If we request a resource from `%ford` that does not correspond to +a file in `/doc`, we search in `/fab` for the longest matching +prefix of the path. The fabricator receives as its sole argument +that part of the path not part of the matching prefix. + +Thus, if we search for `/a/b/c/d`, and there's no `/doc/a/b/c/d`, +then we first check for `/fab/a/b/c/d`, then `/fab/a/b/c`, and so +on. If we find, for example, a `/fab/a/b`, then we run that +fabricator with the argument `/c/d`. Thus, a fabricator can +present an arbitrary virtual document tree. + +The `%ford` vane also exports its namespace through `scry`, but +`scry` has no way to block if a computation waits. It will just +produce `~`, binding unknown. + +Another major `%ford` feature is the mark system. While the Hoon +type system works very well, it does not fill the niche that MIME +types fill on the Internets. Marks are like MIME types, if MIME +types came with with executable specifications which defined how +to validate, convert, diff, and patch content objects. + +The mark `%foo` is simply the core built from `/mar/foo`. If +there is no `/mar/foo`, all semantics are defaulted - `%foo` is +treated as `%noun`. If there is a core, it can tell `%ford` how +to validate/convert from other marks (validation is always +equivalent to a conversion from `%noun`, and seldom difficult, +since every Hoon structure is defined as a fixpoint normalizer); +convert *to* other marks; patch, diff, and merge. Obviously +`%clay` uses these revision control operations, but anyone can. + +For complex conversions, `%ford` has a simple graph analyzer that +can convert, or at least try to convert, any source mark to any +target mark. + +There are two ford request modes, `%c` and `%r` - cooked and raw. +A cooked request, `%fc` in `scry`, has a target mark and converts +the result to it. A raw request, `%fr`, returns whatever `%ford` +finds easiest to make. + +Generators and filters, `/gen` and `/tip`, are actually not used +by any other vane at present, but only by the user-level `:dojo` +app, which constructs dataflow calculations in Unix pipe style. +Applications, `/app`, are used only by the `%g` vane. + +### `%g` `%gall`: application driver + +`%g` is the application system, currently `%gall` (1300 lines) +`%gall` is half process table and half systemd, sort of. + +`%gall` runs user-level applications much as Arvo runs vanes. Why +this extra layer? Vanes are kernel components written by kernel +hackers, for whom expressiveness matters more than convenience. A +bad kernel module can disable the OS; a bad application has to be +firewalled. + +But the basic design of an application and a vane is the same: a +stateful core that's called with events and produces moves. Only +the details differ, and there are too many to cover here. + +One example is that `%gall` apps don't work with Arvo ducts +directly, but opaque integers that `%gall` maps to ducts; the +consequences of a bad app making a bad duct would be odd and +potentially debilitating, so we don't let them touch ducts +directly. Also, `%gall` does not like to crash, so to execute +user-level code it uses the virtual interpeter `mock`. At the +user level, the right way to signal an error is to crash and let +`%gall` pick up the pieces - in a message handler, for instance, +this returns the crash dump in a negative ack. + +`%gall` uses `%ford` to build cores and track dependencies. Like +vanes, `%gall` applications update when new code is available, +sometimes using adapter functions to translate old state types to +new ones. Unlike Arvo, `%gall` triggers updates automatically +when a dependency changes. + +Other than convenience and sanity checks, the principal value add +of `%gall` is its inter-application messaging protocol. `%gall` +messaging supports two communication patterns: a one-way message +with a success/error result (`%poke`), and classic publish and +subscribe (`%peer`). + +`%peer` is designed for synchronizing state. The subscriber +specifies a path which defines an application resource, and +receives a stream of `%full` and `%diff` cards, total and +incremental updates. (A simple "get" request is a special case in +which there is a single full and no diffs.) Backpressure breaks +the subscription if the queue of undelivered updates grows too +deep. Broken subscriptions should not cause user-level errors; +the user should see an error only if the subscription can't be +reopened. + +Recall that `%ames` routes messages which are arbitrary nouns +between vanes on different urbits. `%gall` uses these `%ames` +messages for remote urbits, and direct moves for local IPC. Thus, +from the app's perspective, messaging an app on another Urbit is +the same as messaging another app on the same Urbit. The +abstraction does not leak. + +Messages are untyped at the `%ames` level, but all `%gall` +messages carry a mark and are validated by the receiver. Marks +are not absolute and timeless - `%ford` needs a `beak` (plot, +desk, version) to load the mark source. + +When a message fails to validate, the packet is dropped silently. +This puts the sender into exponential backoff retransmission. The +general cause of a message that doesn't validate is that the +sender's mark has received a backward-compatible update (mark +updates that aren't backward compatible are a bad idea - use a +new name), and the receiver doesn't have this update yet. The +retransmitted packet will be processed correctly once the update +has propagated. + +With this mechanism, Urbit can update a distributed system (such +as our own `:talk` network), upgrading both applications and +protocols, silently without user intervention or notification. +The general pattern of application distribution is that the app +executes from a local desk autosynced to a remote urbit, which +performs the function of an app store or distro. As this server +fills its subscriptions, a period of network heterogeneity is +inevitable; and so is transient unavailability, as new versions +try to talk to old ones. But it resolves without hard errors as +all clients are updated. + +### `%e` `%eyre`: web server/client + +`%e` is the web system, currently `%eyre` (1600 lines). + +`%eyre` has three purposes. First, it is an HTTP and HTTPS +client - or rather, it interfaces via actions/events to HTTP +client handlers in the Unix layer. + +Second, `%eyre` serves the `%ford` namespace over HTTP. The URL +extension is parsed as a mark, but the default mark for requests +is `urb`, which creates HTML and injects an autoupdate script +that long-polls on the dependencies, reloading the page in the +background when they change. + +The normal way of publishing static or functional content in +Urbit is to rely on `%ford` for format translation. Most static +content is in markdown, the `%md` mark. Dynamic content is best +generated with the `sail` syntax subsystem in Hoon, which is is +essentially an integrated template language that reduces to XML. + +Third, `%eyre` acts as a client for `%gall` apps, translating the +`%gall` message flow into JSON over HTTP. `%poke` requests become +POST requests, `%peer` becomes a long-poll stream. Our `urb.js` +framework is a client-side wrapper for these requests. + +The abstraction does not leak. On the server side, all it takes +to support web clients is ensuring that outbound marks print to +`%json` and inbound marks parse from `%json`. (The standard +library includes JSON tools.) The `%gall` application does not +even know it's dealing with a web client, all it sees are +messages and subscriptions, just like it would receive from +another Urbit app. + +The dataflow pattern of `%gall` subscriptions is ideal for React +and similar "one-way data binding" client frameworks. + +Of course, `%gall` apps expect to be talking to an urbit plot, so +web clients need to (a) identify themselves and (b) talk over +HTTPS. `%eyre` contains a single sign-on (SSO) flow that +authenticates an urbit user either to her own server or her +friends'. + +Ideally, an urbit named `~plot` is DNSed to `plot.urbit.org`. If +you use your urbit through the web, you'll have an insecure +cookie on `*.urbit.org`. Other urbits read this and drive the SSO +flow; if you're `~tasfyn-partyv`, you can log in as yourself to +`~talsur-todres.urbit.org`. The SSO confirmation message is sent +directly as an `%ames` message between the `%eyre` vanes on the +respective urbits. + +Urbit also has an nginx configuration and node cache server, +which (a) let a relatively slow Urbit server drive reasonably +high request bandwidth, and (b) serve HTTPS by proxy. + +Note that while external caching helps `%ford` style functional +publishing, it does not help `%gall` style clients, which use +server resources directly. Urbit is a personal server; it can +handle an HN avalanche on your blog, but it's not designed to +power your new viral startup. + +### `%j` `%jael`: secret storage + +`%j`, currently `%jael` (200 lines), saves secrets in a tree. +Types of secrets that belong in `%jael`: Urbit private keys, +Urbit symmetric keys, web API user keys and/or passwords, web API +consumer (application) keys. + +`%jael` has no fixed schema and is simply a classic tree +registry. Besides a simpler security and type model, the main +difference between secrets and ordinary `%clay` data is that +secrets expire - sometimes automatically, sometimes manually. +When another vane uses a `%jael` secret, it must register to +receive an expiration notice. + +### `%d` `%dill`: console and Unix + +`%d`, currently `%dill` (450 lines) handles the terminal and +miscellaneous Unix interfaces, mainly for initialization and +debugging. + +The console terminal abstraction, implemented directly with +`terminfo` in raw mode, gives Urbit random-access control over +the input line. Keystrokes are not echoed automatically. Output +lines are write-only and appear above the input line. + +`%dill` also starts default applications, measures system memory +consumption, dumps error traces, etc. + +### `%b` `%behn`: timers + +`%b`, currently `%behn` (250 lines), is a timer vane that +provides a simple subscription service to other vanes. + +Base applications +----------------- + +Arvo ships with three major default applications: `:hood` (1600 +lines), `:dojo` (700 lines), and `:talk` (1600 lines). + +### `:dojo`: a shell + +`:dojo` is a shell for ad-hoc computation. A dojo command is a +classic filter pipeline, with sources, filters, and sinks. + +Sources can be shell variables, `%clay` files, immediate +expressions, source files in `/gen`, or uploads (Unix files in +`~/urbit/$plot/.urb/get`). There are three kinds of generator: +simple expressions, dialog cores, and web scrapers. Generators +are executed by `%ford` through `mock`, and can read the Urbit +namespace via virtual operator `11`. + +Urbit does not use short-lived applications like Unix commands. A +`%gall` application is a Unix daemon. A generator is not like a +Unix process at all; it cannot send moves. A dialog generator, +waiting for user input, does not talk to the console; it tells +`:dojo` what to say to the console. A web scraper does not talk +to `%eyre`; it tells `:dojo` what resources it needs (GET only). +This is POLA (principle of least authority); it makes the command +line less powerful and thus less scary. + +`:dojo` filters are immediate expressions or `/tip` source files. +Sinks are console output, shell variables, `%clay`, or downloads +(Unix files in `~/urbit/$plot/.urb/put`). (The upload/download +mechanism is a console feature, not related to `%clay` sync - +think of browser uploading and downloading.) + +### `:talk`: a communication bus + +`:talk` is a distributed application for sharing *telegrams*, or +typed social messages. Currently we use `:talk` as a simple chat +service, but telegram distribution is independent of type. + +Every urbit running `:talk` can create any number of "stations." +Every telegram has a unique id and a target audience, to which +its sender uploads it. But `:talk` stations also subscribe to +each other to construct feeds. + +Mutually subscribing stations will mirror, though not preserving +message order. A `:talk` station is not a decentralized entity; +but urbits can use subscription patterns to federate into +"global" stations - very much as in NNTP (Usenet). + +Stations have a flexible admission control model, with a blanket +policy mode and an exception list, that lets them serve as +"mailboxes" (public write), "journals" (public read), "channels" +(public read/write), or "chambers" (private read/write). + +Subscribers are also notified of presence and configuration +changes, typing indicator, etc. Also, telegrams contain a +"flavor", which senders can use to indicate categories of content +that other readers may prefer not to see. + +`:talk` is usable both a command-line application and a reactive +web client. + +### `:hood`: a system daemon + +`:hood` is a classic userspace system daemon, like Unix `init`. +It's a system component, but it's in userspace because it can be. + +`:hood` is actually a compound application made of three +libraries, `/helm`, `/drum`, and `/kiln`. `/helm` manages the +PKI; `/drum` multiplexes the console; `/kiln` controls `%clay` +sync. + +`/drum` routes console activity over `%gall` messages, and can +connect to both local and foreign command-line interfaces - ssh, +essentially. + +One pattern in Urbit console input is that an application doesn't +just parse its input after a line is is entered, but on each +character. This way, we can reject or correct syntax errors as +they happen. It's a much more pleasant user experience. + +But since both sides of a conversation (the user and the +application) are making changes to a single shared state (the +input line), we have a reconciliation problem. The Urbit console +protocol uses operational transformation (like Google Wave or +`git replot`) for eventual consistency. + +`/helm` manages public keys and (in future) hosted urbits. See +the network architecture section below. + +`/kiln` implements mirroring and synchronization. Any desk can +mirror any other desk (given permission). Mirrors can form +cycles -- a two-way mirror is synchronization. + +Implementation issues +--------------------- + +We've left a couple of knotty implementation issues unresolved up +until now. Let's resolve them. + +### Jets + +How can an interpreter whose only arithmetic operator is +increment compute efficiently? For instance, the only way to +decrement `n` is to count up to `n - 1`, which is O(n). + +Obviously, the solution is: a sufficiently smart optimizer. + +A sufficiently smart optimizer doesn't need to optimize every +Nock formula that could calculate a decrement function. It only +needs to optimize one: the one we actually run. + +The only one we run is the one compiled from the decrement +function `dec` in the Hoon standard library. So there's no sense +in which our sufficiently smart optimizer needs to *analyze* Nock +formulas to see if they're decrement formulas. It only needs to +*recognize* the standard `dec`. + +The easiest way to do this is for the standard `dec` to declare, +with a hint (Nock `10`), in some formalized way, that it is a +decrement formula. The interpreter implementation can check this +assertion by simple comparison - it knows what formula the +standard `dec` compiles to. Our sufficiently smart optimizer +isn't very smart at all! + +The C module that implements the efficient decrement is called a +"jet." The jet system should not be confused with an FFI: a jet +has *no* reason to make system calls, and should never be used to +produce side effects. Additionally, a jet is incorrect unless it +accurately duplicates an executable specification (the Hoon +code). Achieving jet correctness is difficult, but we can +spot-check it easily by running both soft and hard versions. + +Jets separate mechanism and policy in Nock execution. Except for +perceived performance, neither programmer nor user has any +control over whether any formula is jet-propelled. A jet can be +seen as a sort of "software device driver," although invisible +integration of exotic hardware (like FPGAs) is another use case. +And jets do not have to be correlated with built-in or low-level +functionality; for instance, Urbit has a markdown parser jet. + +Jets are of course the main fail vector for both computational +correctness and security intrusion. Fortunately, jets don't make +system calls, so sandboxing policy issues are trivial, but the +sandbox transition needs to be very low-latency. Another approach +would be a safe systems language, such as Rust. + +The correctness issue is more interesting, because errors happen. +They are especially likely to happen early in Urbit's history. A +common scenario will be that the host audits an urbit by +re-executing all the events, and produces a different state. In +this case, the urbit must become a "bastard" - logically +instantiated at the current state. The event log is logically +discarded as meaningless. Hosting a bastard urbit is not a huge +problem, but if you have one you want to know. + +In the long run, jet correctness is an excellent target problem +for fuzz testers. A sophisticated implementation might even put +10% of runtime into automatic jet testing. While it's always hard +for implementors to match a specification perfectly, it's much +easier with an executable specification that's only one or two +orders of magnitude slower. + +### Event planning + +How do we actually process Urbit events? If Urbit is a database, +how does our database execute and maintain consistency in +practice, either on a personal computer or a normal cloud server? +How does orthogonal persistence work? Can we use multiple cores? + +An event is a transaction. A transaction either completes or +doesn't, and we can't execute its side effects until we have +committed it. For instance, if an incoming packet causes an +outgoing packet, we can't send the outgoing packet until we've +committed the incoming one to durable storage. + +Unfortunately, saving even a kilobyte of durable storage on a +modern PC, with full write-through sync, can take 50 to 100ms. +Solid-state storage improves this, but the whole machine is just +not designed for low-latency persistence. + +In the cloud the situation is better. We can treat consensus on a +nontrivial Raft cluster in a data center as persistence, even +though the data never leaves RAM. Urbit is highly intolerant of +computation error, for obvious reasons, and should be run in an +EMP shielded data center on ECC memory. + +There are a number of open-source reliable log processors that +work quite well. We use Apache Kafka. + +With either logging approach, the physical architecture of an +Urbit implementation is clear. The urbit is stored in two forms: +an event log, and a checkpoint image (saved periodically, always +between events). The log can be pruned at the last reliably +recorded checkpoint, or not. This design (sometimes called +"prevalence") is widely used and supported by common tools. + +The checkpointing process is much easier because Urbit has no +cycles and needs no tracing garbage collector. Without careful +tuning, a tracing GC tends to turn all memory available to it +into randomly allocated memory soup. The Urbit interpreter uses a +region system with a copy step to deallocate the whole region +used for processing each event. + +Events don't always succeed. How does a functional operating +system deal with an infinite loop? The loop has to be +interrupted, as always. How this happens depends on the event. If +the user causes an infinite loop from a local console event, it's +up to the user to interrupt it with \^C. Network packets have a +timeout, currently a minute. + +When execution fails, we want a stack trace. But Urbit is a +deterministic computer and the stack trace of an interrupt is +inherently nondeterministic. How do we square this circle? + +Urbit is deterministic, but it's a function of its input. If an +event crashes, we don't record the event in `I`. Instead, we +record a `%crud` card that contains the stack trace and the +failed event. To Urbit, this is simply another external event. + +Processing of `%crud` depends on the event that caused it. For +keyboard input, we print the error to the screen. For a packet, +we send a negative ack on the packet, with the trace. For +instance, if you're at the console of urbit `A` and logged in +over the network to `B`, and you run an infinite loop, the `B` +event loop will time out; the network console message that `A` +sent to `B` will return a negative ack; the console application +on `A` will learn that its message failed, and print the trace. + +Finally, the possibilities of aggressive optimization in event +execution haven't been explored. Formally, Urbit is a serial +computer - but it's probable that a sophisticated, mature +implementation would find a lot of ways to cheat. As always, a +logically simple system is the easiest system to hack for speed. + +Network and PKI architecture +---------------------------- + +Two more questions we've left unanswered: how you get your urbit +plot, and how packets get from one plot to another. + +Again, a plot is both a digital identity and a routing address. +Imagine IPv4 if you owned your own address, converted it to a +string that sounded like a foreign name, and used it as your +Internet handle. + +Bases are parsed and printed with the `%p` aura, which is +designed to make them as human-memorable as possible. `%p` uses +phonemic plot-256 and renders smaller plots as shorter strings: + + 8 bits galaxy ~syd + 16 bits star ~delsym + 32 bits planet ~mighex-forfem + 64 bits moon ~dabnev-nisseb-nomlec-sormug + 128 bits comet ~satnet-rinsyr-silsec-navhut--bacnec-todmeb-sarseb-pagmul + +Of course, not everyone who thinks he's Napoleon is. For the +urbit to actually send and receive messages under the plot it +assigns itself in the `%init` event (`I5`), later events must +convey the secrets that authenticate it. In most cases this means +a public key, though some urbits are booted with a symmetric +session key that only works with the parent plot. + +How do you get a plot? Comets are hashes of a random public key. +"Civil" non-comets are issued by their parent plot - the +numerical prefix in the next rank up: moons by planet, planets by +star, stars by galaxy. The fingerprints of the initial galactic +public keys (currently test keys) are hardcoded in `%ames`. + +The comet network is fully decentralized and "free as in beer," +so unlikely to be useful. Any network with disposable identities +can only be an antisocial network, because disposable identities +cannot be assigned default positive reputation. Perhaps comets +with nontrivial hashcash in their plots could be an exception, +but nonmemorable plots remain a serious handicap. + +The civil network is "semi-decentralized" - a centralized system +designed to evolve into a decentralized one. Urbit does not use a +blockchain - address space is digital land, not digital money. + +The initial public key of a civil plot is signed by its parent. +But galaxies, stars and planets are independent once they've been +created - they sign their own updates. (Moons are dependent; +their planet signs updates.) + +The certificate or `will` is a signing chain; only the last key +is valid; longer wills displace their prefixes. Thus update is +revocation, and revocation is a pinning race. To securely update +a key is to ensure that the new will reaches any peer before any +messages signed by an attacker who has stolen the old key. + +The engineering details of this race depend on the actual threat +model, which is hard to anticipate. If two competing parties have +the same secret, no algorithm can tell who is in the right. Who +pins first should win, and there will always be a time window +during which the loser can cause problems. Aggressively flooding +and expiring certificates reduces this window; caching and lazy +distribution expands it. There is no tradeoff-free solution. + +Broadly, the design difference between Urbit and a blockchain +network is that blockchains are "trust superconductors" - they +eliminate any dependency on social, political or economic trust. +Urbit is a "trust conductor" - engineered to minimize, but not +eliminate, dependencies on trust. + +For instance, bitcoin prevents double-spending with global mining +costs in multiple dollars per transaction (as of 2015). Trusted +transaction intermediation is an easily implemented service whose +cost is a tiny fraction of this. And the transaction velocity of +money is high; transactions in land are far more rare. + +Another trust engineering problem in Urbit is the relationship +between a plot and its parent hierarchy. The hierarchy provides a +variety of services, starting with peer-to-peer routing. But +children need a way to escape from bad parents. + +Urbit's escape principle: (a) any planet can depend on either its +star's services, or its galaxy's; (b) any star can migrate to any +galaxy; (c) stars and galaxies should be independently and +transparently owned. + +Intuitively, an ordinary user is a planet, whose governor is its +star, and whose appeals court is its galaxy. Since a star or +galaxy should have significant reputation capital, it has an +economic incentive to follow the rules. The escape system is a +backup. And the comet network is a backup to the backup. + +From a privacy perspective, a planet is a personal server; a star +or galaxy is a public service. Planet ownership should be private +and secret; star or galaxy ownership should be public and +transparent. Since, for the foreseeable future, individual +planets have negligible economic value, Urbit is not a practical +money-laundering tool. This is a feature, not a bug. + +Finally, a general principle of both repositories and republics +is that the easier it is (technically) to fork the system, the +harder it is (politically) to fork. Anyone could copy Urbit and +replace the galactic fingerprint block. Anyone can also fork the +DNS. If the DNS was mismanaged badly enough, someone could and +would; since it's competently managed, everyone can't and won't. +Forkability is an automatic governance corrector. + +From personal server to digital republic +---------------------------------------- + +Republics? Any global system needs a political design. Urbit is +designed as a a *digital republic*. + +The word *republic* is from Latin, "res publica" or "public +thing." The essence of republican goverment is its +*constitutional* quality - government by law, not people. + +A decentralized network defined by a deterministic interpreter +comes as close to a digital constitution as we can imagine. Urbit +is not the only member of this set - bitcoin is another; ethereum +is even a general-purpose computer. + +The "law" of bitcoin and ethereum is self-enforcing; the "law" of +Urbit is not. Urbit is not a blockchain, and no urbit can assume +that any other urbit is computing the Nock function correctly. + +But at least the distinction between correct and incorrect +computing is defined. In this sense, Nock is Urbit's +constitution. It's not self-enforcing like Ethereum, but it's +exponentially more efficient. + +Non-self-verifying rules are useful, too. Defining correctness is +not enforcing correctness. But Urbit doesn't seek to eliminate +its dependency on conventional trust. Correctness precisely +defined is easily enforced with social tools: either the +computation is tampered with or it isn't. + +Conclusion +========== + +On the bottom, Urbit is an equation. In the middle it's an +operating system. On the top it's a civilization -- or at least, +a design for one. + +When we step back and look at that civilization, what we see +isn't that surprising. It's the present that the past expected. +The Internet is what the Arpanet of 1985 became; Urbit is what +the Arpanet of 1985 wanted to become. + +In 1985 it seemed completely natural and inevitable that, by +2015, everyone in the world would have a network computer. Our +files, our programs, our communication would all go through it. + +When we got a video call, our computer would pick up. When we had +to pay a bill, our computer would pay it. When we wanted to +listen to a song, we'd play a music file on our computer. When we +wanted to share a spreadsheet, our computer would talk to someone +else's computer. What could be more obvious? How else would it +work? + +(We didn't anticipate that this computer would live in a data +center, not on our desk. But we didn't appreciate how easily a +fast network can separate the server from its UI.) + +2015 has better chips, better wires, better screens. We know what +the personal cloud appliance does with this infrastructure. We +can imagine our 1985 future ported to it. But the most +interesting thing about our planets: we don't know what the world +will do with them. + +There's a qualitative difference between a personal appliance and +a personal server; it's the difference between a social "network" +and a social network. A true planet needs to work very hard to +make social programming easier. Still, distributed social +applications and centralized social applications are just +different. A car is not a horseless carriage. + +We know one thing about the whole network: by default, a social +"network" is a monarchy. It has one corporate dictator, its +developer. By default, a true network is a republic; its users +govern it. And more important: a distributed community cannot +coerce its users. Perhaps there are cases where monarchy is more +efficient and effective -- but freedom is a product people want. + +But no product is inevitable. Will we even get there? Will Urbit, +or any planet, or any personal cloud server, actually happen? + +It depends. The future doesn't just happen. It happens, +sometimes. When people work together to make it happen. +Otherwise, it doesn't. + +The Internet didn't scale into an open, high-trust network of +personal servers. It scaled into a low-trust network that we use +as a better modem -- to talk to walled-garden servers that are +better AOLs. We wish it wasn't this way. It is this way. + +If we want the network of the future, even the future of 1985, +someone has to build it. Once someone's built it, someone else +has to move there. Otherwise, it won't happen. + +And for those early explorers, the facilities will be rustic. Not +at all what you're used to. More 1985 than 2015, even. These +"better AOLs", the modern online services that are our personal +appliances, are pretty plush in the feature department. + +Could they just win? Easily. Quite easily. It's a little painful +herding multiple appliances, but the problem is easily solved by +a few more corporate mergers. We could all just have one big +appliance. If nothing changes, we will. + +To build a new world, we need the right equation and the right +pioneers. Is Urbit the right equation? We think so. Check our +work, please. Are you the right pioneer? History isn't over -- +it's barely gotten started. + +
+ diff --git a/pub/docs/user.mdy b/pub/docs/user.mdy new file mode 100644 index 0000000000..30070a0dfa --- /dev/null +++ b/pub/docs/user.mdy @@ -0,0 +1,24 @@ +--- +logo: black +title: User doc +sort: 1 +--- +
+ +# User documentation + +Read the [introduction](user/intro) for a summary of Urbit. +The [installation guide](user/install) gets you ready to run. +The [launch procedure](user/launch) holds your hand as you create +your server image. Once your urbit is live, the [quickstart +page](user/start) is all you need if in a hurry. + +For power users, the [appliance handbook](user/appliance) explains +your apps and how to control them. The [filesystem handbook](user/clay) +explains the Urbit filesystem and how to sync it with Unix. +Finally, the [:dojo manual](user/dojo) and [:talk manual](user/talk) +explore the fine points of our shell and messenger respectively. + + + +
diff --git a/pub/docs/user/appliance.mdy b/pub/docs/user/appliance.mdy new file mode 100644 index 0000000000..303e073c40 --- /dev/null +++ b/pub/docs/user/appliance.mdy @@ -0,0 +1,274 @@ +--- +title: Appliance handbook +sort: 5 +next: true +--- + +# Appliance handbook + +You've built and launched your urbit. How do you control it +securely? Three ways: through the Unix console, over the Web, +or via an Urbit moon. + +What are you controlling, anyway? A user-level application on +Urbit is called an "appliance." Think of an appliance as like a +Unix process that's also a persistent database. + +By default, your urbit is running two appliances, the `:dojo` +shell and the `:talk` messenger. For more advanced information +about your appliance state, see the end of this document. + +## Console + +The Unix console is the most basic way you talk to apps. You've +already used it a bit, but let's do more. + +The Urbit command line is a little like the Unix command line, +but different. It's also a little like a window manager. + +Your Unix terminal is separated into two parts: the prompt (the +bottom line) and the record (the rest of your screen). + +The record is shared; all the output from all the apps in your +command set appears in it. So you'll see `:talk` messages while +working in the dojo. + +The prompt is multiplexed; you switch the prompting app with +`^X`. Pressing return sends the prompt line to the app. Urbit +does not automatically echo your input line into the record, +the way a normal Unix console does. + +Also unlike a normal Unix console, the application can process +your input before you hit return. In general, invalid input is +rejected with a beep. Incorrect input may even be corrected! +Yes, this is highly advanced console technology. + +Like many but not all Unix command lines, Urbit has built-in +history editing. You've never seen anything like these +innovative key bindings before: + + ^A cursor to beginning of the line (Home) + ^B cursor one character backward (left-arrow) + ^E cursor to the end of the line (End) + ^F cursor one character forward (right-arrow) + ^G beep; cancel reverse-search + ^K kill to end of line + ^L clear the screen + ^N next line in history (down-arrow) + ^P previous line in history (up-arrow) + ^R reverse-search + ^T transpose characters + ^U kill to beginning of line + ^Y yank from kill buffer + +`^C` is processed at the Unix layer, not within Urbit. If +there's an event currently running, ^C interrupts it and prints a +stack trace -- useful for finding infinite loops. + +`^D` from `:talk` or `:dojo` stops your Unix process. From any +other app, it removes the prompting app from the command set. + +Pressing left-arrow or `^B` at the start of a line is an input +operation with the metaphorical meaning "get me out of here," +ie, escape. The exact semantics of an escape are application +dependent. + +## Web + +For now, we'll keep assuming you're at `http://localhost:8080`. +But for planets only, we also proxy web domains through Urbit's +own servers. Any planet `~fintud-macrep` is also at +`fintud-macrep.urbit.org`. Please use this proxy as little as +possible; it's not well-optimized. + +There's a web interface to `:talk` at + + http://localhost:8080/~~/pub/talk/fab + +and a dojo interface at + + http://localhost:8080/~~/pub/dojo/fab + +The `:talk` client is beautiful and works quite well. Use it. +The `:dojo` client is a bit more of a work in progress. (For +instance, you can't paste text into it.) + +The login flow remains rather a work in progress. It's not at +all secure. But the first time you use an Urbit app (not just a +generated page -- you can tell by the `~~` in the URL), it will +prompt you for a password. + +In a righteous world, the password would be (or default to) your +initial ticket. Since the world is lawless and filled with evil, +just hit return and send a empty password. The real password +will be printed on your console. + +A URL that starts with `/~~/` authenticates your web page as the +same urbit as the server. If instead of `/~~/` you write +`/~fintud-macrep/` (you're using an app on someone else's urbit, +and want to sign in as yourself), you'll authenticate with a +single-signon flow. Don't worry, only your own urbit ever sees +your password. Once you log in, a cookie is set and you don 't +need to do it again. + +Internally, Urbit treats every web page as another urbit; even a +request with no `/~~/` or `/~fintud-macrep/` is an anonymous +comet. Appliances actually can't tell whether they're talking to +an urbit over `%ames`, or a browser over HTTP. + +### Enabling HTTPS + +Urbit doesn't yet serve HTTPS directly. But when you route +through `urbit.org`, you can also use HTTPS. Our server handles +your HTTP request and proxies it over Urbit. Of course we could +MITM you, but we won't. If you want HTTPS on your own urbit, use +the "secure" port shown on startup: + + http: live ("secure") on 8443 + +Firewall off this port. Get an nginx or other outer server. Put +your SSL certificate in it and reverse-proxy to `8443`. + +## An Urbit moon + +The fanciest way to control your urbit is through Urbit itself: +a moon, or satellite urbit. Sadly, only planets can have moons. + +In this setup, you have a planet running on a box in the cloud, +and a moon on your laptop or other mobile. Changes to the planet +automatically propagate to the moon. Also, the moon's console is +linked to the `:talk` appliance on the planet, so you communicate +as yourself. + +To build your moon, just run + + ~fintud-macrep:dojo> +moon + +This will generate a plot and a ticket. The plot will be a +64-bit plot within your planet, like `~mignel-fotrym-fintud-macrep`. +On your laptop, install Urbit: + + urbit -w $plot -t $ticket + +You'll get an urbit where ^x will switch you back and forth +between the two prompts: + + ~mignel-fotrym-fintud-macrep:dojo> + ~fintud-macrep:talk() + +Your moon's `%home` desk is already synced to the `%kids` desk on +your planet. If you want changes on your moon's `%home` to sync +back into your planet's `%home`, + + ~fintud-macrep:dojo> |sync %home ~mignel-fotrym-fintud-macrep + +## Urbit internals + +As a user of any machine, you can't help understanding the +machine on more or less the same terms as its engineers. You +should at least know what the major components are. + +### Source code + +Urbit is a "run-from-repository" OS; all code ships as source on +your own urbit, in the revision-control vane `%clay`. Better +yet, live code in every layer updates itself when the source +changes. If the local source is subscribed to a remote +publisher, the update process is "evergreen" and automatic. + +There are five major layers of code in Urbit. Layer 1 is the +kernel (`arvo/hoon.hoon`), which includes both the Hoon compiler +and the Arvo event loop. Layer 2 is the main library +(`arvo/zuse.hoon`), Layer 3 is the kernel modules or "vanes" +(`arvo/*.hoon`). Layer 4 is appliances, marks, and fabricators +(`ape/*.hoon`, `mar/*.hoon`, `fab/*.hoon`), run from the `%gall` +and `%ford` vanes. Layer 5 is generators (`gen/*.hoon`), run +from the `:dojo` appliance. + +The kernel (layer 1), the vanes (layer 3) and the apps (layer 4) +are stateful; their state is typed, and of course needs to be +preserved across code updates. If the new state type differs, +the developer must provide a state adaptor. If compilation fails, +the new code is disregarded and retried again on the next change. + +Appliances are like Unix daemons, except persistent and permanent. +But they are the only process-like constructs in Urbit. While +Unix uses many short-lived processes that are not daemons, Urbit +invokes simple tasks in simple contexts. In Unix terms: since +the `ls` process is not meant to run forever and/or have side +effects, giving it the power to do so is asking for trouble. In +Urbit, the layer 5 dojo generator `+ls` is a pure function which +simply produces some typed output. + +You should never have to worry about any of this stuff; we're +just describing it here so it's not a mystery. + +### Appliances + +Your main configuration task is choosing (a) what appliances +on your urbit should be running (the *active set*), and (b) what +appliances your console should be linked to (the *command set*). +Again, ^X switches the console prompt between appliances in (b). + +Why are these different sets? First, some apps don't need a +command prompt. Your urbit runs three default apps: `:talk`, +`:dojo`, and `:hood`. The hood is a system appliance and has no +direct UI (you control it through the dojo). + +Second, you can also link your console to apps on other urbits, +and put those connections in the command set. Essentially, the +console is also `ssh` or `rlogin`. The console is just another +appliance (it's actually the `drum` library within `:hood`), +and Arvo is good at routing events over the network. + +What is an appliance or "app," anyway? An app, basically. Think +of a Unix daemon, except that (1) it only responds to events and +can't run continuously ("nonpreemptive"); and (2) it's +"permanent" (never exits, dies, or is even reset). + +Why do appliances live forever? They have to. An appliance name +is also a sort of port in the Internet sense. A conversation +with appliance X on urbit Y is one conversation, not a sequence +of disconnected activations. + +An appliance is always sourced from a path which is a function of +its name, from `%foo` to `/===/ape/foo/hoon`. In this path, the +urbit is self; the version is now. The desk defaults to `%home`; +changing it will adapt the appliance state. + +The normal way to run code written by some external developer is +to merge (one time) or sync (for continuous upgrades), the +publisher's distribution desk, to a desk on your own urbit. +Obviously a desk per vendor is ideal, which also lets you +"sidegrade" an app to a different vendor by switching desks. + +### Configuration commands + +Initially, your command set is `:talk` and `:dojo`; your active +set is `:talk`, `:dojo` and `:hood`. It's completely fine to +never even think about changing this, but... + +#### `|link $?([app=term ~] [who=plot app=term ~])` + +Link your console to an existing appliance, putting it in the +command set. If no plot is specified, it's an appliance on your +own urbit. + + |link %dojo + |link ~tasfyn-partyv %dojo + +Permission is up to the appliance, of course. + +#### `|unlink $?([app=term ~] [who=plot app=term ~])` + +The opposite of `|link`. Same interface. + +#### `|start [app=term $|(~ [syd=desk ~])]` + +Start an appliance, adding it to the active set. If `syd` is not +specified, the desk is `%home`: + + |start %foo + |start %foo %away + +If the appliance is running, `|start` sets its desk instead. diff --git a/pub/docs/user/clay.mdy b/pub/docs/user/clay.mdy new file mode 100644 index 0000000000..018f5a37db --- /dev/null +++ b/pub/docs/user/clay.mdy @@ -0,0 +1,418 @@ +--- +title: Filesystem handbook +sort: 6 +next: true +--- + +# Filesystem handbook + +Urbit has its own revision-controlled filesystem, the `%clay` +vane. `%clay` is like a simplified `git`, but more reactive, +and also typed. Okay, this makes no sense. + +The most common way to use `%clay` is to mount a `%clay` node in +a Unix directory. The Urbit process will watch this directory +and automatically record edits as changes, Dropbox style. The +mounted directory is always at the root of your pier directory. + +## Commands + +Note that in both commands and generators, a currently unbound +case (such as a version in the future) will make the calculation +block, not complete. A remote case will cause a network request. +A remote, unbound case will cause a waiting subscription. + +### Mounting to Unix + +#### `|mount [pax=path pot=$|(~ [knot ~])]` + +Mount the path `pax` at the Unix mount point `pot`, the name of a +subdirectory in your pier. + + |mount %/pub/doc %documents + +with a `$PIER` of `/home/nixon/urbit/fintud-macrep`, will mount +`%/pub/doc` in `/home/nixon/urbit/fintud-macrep/documents`. + +The mount point is optional; if it's not supplied, the last knot +in the path (`%doc`) will be used. + +#### `|unmount [mon=$|(term [knot path]) ~] ` + +Undo a mount, either by specifying the path or the mount point: + + |unmount %/pub/doc + |unmount %documents + +It's a good habit to also delete the Unix subtree, but Urbit +doesn't do it for you. + +### Revision-control operations + +#### `|merge [syd=desk src=beak how=$|(~ [germ ~])]` + +Merge the beak `src` into the desk `syd`, with optional merge +strategy `how`. + +The `src` beak can be a desk (`%home`); a plot-desk cell +(`[~doznec %home]`); or a plot-desk-case path (`/=home=`). + + |merge %home-work /=home= %fine + |merge %home-work /=home= + +#### `|sync [syd=desk her=plot org=$|(~ [desk ~])]` + +Activate autosync from the plot `her` and source desk `org`, into +the desk `syd`. If `org` is omitted, it's the same as `syd`: + + |sync %home-local ~doznec %home + |sync %home ~doznec + +Note that `|merge` takes a path because it needs a source case +(revision), which would make no sense for `|sync`. + +#### `|label [syd=desk lab=term]` + +Label the current version of desk `syd`: + +#### `|unsync [syd=desk her=plot org=desk ~]` + +Turn off autosync. The argument needs to match the original +`|sync` perfectly, or Urbit will become angry and confused. + +### Filesystem manipulation + +#### `|rm [paz=(list path)]` + +Remove any leaf at each of the paths in `paz`. + + |rm /===/pub/fab/nixon/hoon + +Remember that folders in `%clay` are a consequence of the tree of +leaves; there is no `rmdir` or `mkdir`. + +#### `|cp [too=path fro=path how=$|(~ [germ ~])]` + +Copy the subtree `fro` into the subtree `too`, committing it with +the specified merge strategy. + +#### `|mv [too=path fro=path how=$|(~ [germ ~])]` + +In `%clay`, `|mv` is just a shorthand for `|cp` then `|rm`. The +`|rm` doesn't happen unless the `|cp` succeeds, obviously -- it's +good to be transactional. + +### Filesystem generators + +#### `+cal [paz=(list path)]` +#### `+cat [pax=path]` + +Produce the noun, if any, at each of these (global) paths. +`+cat` produces one result, `+cal` a list. + +#### `+ls [pax=path ~]` + +Produce the list of names in the folder at `pax`. + +Because generators aren't passed the dojo's default path, unlike +the current directory in Unix, it's not possible to build an +trivial `+ls` that's the equivalent of Unix `ls`. You always +have to write `+ls %`. + +#### `+ll [pax=path ~]` + +Like `+ls`, but the result is a list of full paths. Useful as +Urbit equivalent of the Unix wildcard `*`. + +## A quick overview of `%clay` + +`%clay` is a typed, global revision-control system. Or in other +words, a typed, global referentially transparent namespace. It's +difficult to understate how awesome this is. + +(Actually, in Layer 4 and 5 code, you can use the Hoon `.^` rune +to literally *dereference* this namespace. And in Layer 5, a +generator will even *block* until the resource is available.) + +(Another awesome global immutable namespace is IPFS. But IPFS is +distributed, whereas `%clay` is just decentralized. IPFS stores +resources around the network in a DHT, like Freenet or +Bittorrent; `%clay` stores resources on the publisher's server, +like HTTP or git.) + +### Path format + +As a noun, a path in `%clay` is a `(list knot)`, where each +segment is an `@ta` atom -- URL-safe text, restricted to `[a z]`, +`[0 9]`, `.`, `-`, `_` and `~`. The list is a tuple terminated +with a Hoon null, `~`. + +As an ordinary Hoon noun, `[%foo %bar %baz]` has this structure. +But Hoon also supports the Unix path syntax: `/foo/bar/baz` is +the same noun. + +### Relative paths + +The Hoon path syntax is always defined relative to a default +path, which is configuration state in the Hoon parser. In +`:dojo`, this works a little like the Unix current directory. + +(But note that in Unix, relative paths are expanded by the +application, which can read the current directory from the +environment. In Urbit, the current directory and variables are +hidden by the dojo from any code it runs. The parser generates +the absolute path -- more like the way a Unix shell parser +unglobs `*`.) + +Relative path syntax: `%` is the default path (Unix `.`). `%%` +is the parent path (Unix `..`). Unix does not have `...`, +`....`, etc. But Urbit has `%%%`, `%%%%`, etc. Urbit has no +local relative paths; in Unix, `foo/bar` is a shorthand for +`./foo/bar`, but in Urbit you have to write `%/foo/bar`. + +Unix has no top-level substitution syntax, but Urbit does. If +the default path is `/foo/bar/baz`, `/=/moo` means `/foo/moo`, +and `/=/moo/=/goo` means `/foo/moo/baz/goo`. Also, instead of +`/=/=/zoo` or `/=/=/=/voo`, write `/==zoo` or `/===voo`. Your +fingers have enough miles on them already. + +### Beak + +The top three knots in a `%clay` path are `/plot/desk/case`, +where `plot` is of course an urbit; `desk` is a branch name; and +`case` is a revision identity, which is either (a) a label, (b) a +date, or (c) a change number. For obscure reasons, this prefix +is called the `beak`. + +### Spur + +The rest of the path, or `spur`, navigates a tree of `node` +nouns. A `node` is like an inode in a Unix filesystem, but +different. + +An inode is *either* a file or a directory. A `node` is *both* a +folder (which may be empty) and an optional leaf (a noun). + +There is no `rmdir` or `mkdir`; an empty node is automatically +pruned, and creating a node creates its path. The absence of a +file-or-directory mode bit eliminates all kinds of strange corner +cases, especially in merging. + +### Leaf + +`%clay` is a typed filesystem, or more precisely a *marked* one. +When we sync Unix and Urbit paths, we convert a Unix file extension +(an informal specifications) into a Urbit `*mark*` (an +executable specification) + +The mark name is actually the last knot in the path. Or to put +it differently: if any `%clay` node has a leaf, its name within +its parent is its mark. + +This is ridiculously confusing without examples. Suppose we have +the following Unix files, with directories to match: + + doc.md + doc/intro.md + doc/start.md + +These become the Urbit files + + %/doc/md + %/doc/intro/md + %/doc/start/md + +The folder map of the `%/doc` node contains three entries: `%md`, +`%intro`, `%start`. The folder of `%/doc/intro` and that of +`%/doc/start` each contain one entry: `%md` (the mark of an atom +in Markdown syntax). + +Perhaps this example helps explains *why* `%clay` uses this node +design. One, it's a simple index-page model for any kind of +published tree. Two, this tree can expand its leaves smoothly, +just by adding content: if we decided `%/doc/start` was not a +leaf but a tree, we could just add `%/doc/start/child/md`. + +And three, the `%clay` node structure syncs invertibly with an +equivalent, and not unduly weird, Unix inode layout. + +### Mounting to Unix + +The most convenient way of interacting with `%clay` is mounting +it to Unix, and modifying it with Unix tools. The mount +directory is a flat subdirectory of your Urbit pier. + +When you have a live mount point, Urbit monitors it with +`inotify()` or equivalent. (It would be neat to have a FUSE +driver, but we don't.) If you shut your urbit off, it will +recheck the mount point when it reloads. + +Unix files beginning with `.`, with no extension, with an +extension that doesn't map to an Urbit mark, or containing data +that doesn't validate to the mark, are ignored. Depending on the +extension, there may be a more or less complex conversion from +the Unix length/bytestream pair to the Urbit noun. + +### More about desks and marks + +The Hoon source code for a mark like `%md` is in +`/===/mar/md/hoon`. But relative to what beak? What's in the +`/===`? + +The mark source of a leaf in `%clay` is always relative to its +own plot, desk and case. For example, a leaf at + + `/~fintud-macrep/home/31/pub/doc/hello/md` + +is controlled by the mark source + + `/~fintud-macrep/home/31/mar/md/hoon` + +If there is no such file or it doesn't compile, the mark is +effectively treated as `%noun`, ie, an arbitrary value. + +(Note that when updating a mark, any update which shrinks the set +of nouns in that mark needs to at least adapt old nouns to new. +Also, mark source updates should be very slow, but aren't. They +should validate all nouns against the new mark, but don't.) + +What can you do with a mark? Validate an arbitrary noun; perform +diffs, patches, and and conflict merges; transform to or from +another mark. The `%ford` vane, which builds and converts nouns, +can even discover and apply multi-step conversion paths. + +Marks are also used to describe network messages. In this case, +the mark source beak is the beak of the receiving urbit. + +### Desks and merging + +As in any git-shaped revision control system, the core operation +of the system is merging. + +One of the effects of same-beak marks is that it doesn't make +sense to create an empty desk. You can't populate an empty desk +properly with typed files. Instead, a new desk should be merged +from an existing desk -- normally the default desk, `%home`. + +It's also generally bad style to edit directly in the desk you +want to modify. Your Unix filesystem changes will appear as a +stream of small, unstructured changes. You should be editing a +working desk. Conventionally, to change `%home`, merge `%home` +into `%home-work`, edit there, and merge back as a "commit." +Ideally, your "commits" include modifications to a text file that +acts as a changelog. + +So merges are important. Again as in `git`, merge strategies are +important. That said, if you are not doing exciting things with +`%clay`, you can skip the strategy subsection. By default, +`%clay` will always use the `%auto` meta-strategy, which will +always work if you're not doing exciting things. + +#### Merge strategies + +There are seven different merge strategies. Throughout our +discussion, we'll say that the merge is from Alice's desk to +Bob's. + +##### Direct strategies + +A `%init` merge should be used iff it's the first commit to a +desk. The head of Alice's desk is used as the number 1 commit to +Bob's desk. Obviously, the ancestry remains intact when +traversing the parentage of the commit, even though previous +commits are not numbered for Bob's desk. + +A `%this` merge means to keep what's in Bob's desk, but join the +ancestry. Thus, the new commit has the head of each desk as +parents, but the data is exactly what's in Bob's desk. For those +following along in git, this is the 'ours' merge strategy, not +the '--ours' option to the 'recursive' merge strategy. In other +words, even if Alice makes a change that does not conflict with +Bob, we throw it away. + +A `%that` merge means to take what's in Alice's desk, but join +the ancestry. This is the reverse of `%this`. + +A `%fine` merge is a "fast-forward" merge. This succeeds iff one +head is in the ancestry of the other. In this case, we use the +descendant as our new head. + +For `%meet`, `%mate`, and `%meld` merges, we first find the most +recent common ancestor to use as our merge base. If we have no +common ancestors, then we fail. If we have multiple most +recent common ancestors, then we have a criss-cross situation, +which should be handled delicately. At present, we don't handle +this kind of situation, but something akin to git's 'recursive' +strategy should be implemented in the future. + +There's a functional inclusion ordering on `%fine`, `%meet`, +`%mate`, and `%meld` such that if an earlier strategy would have +succeeded, then every later strategy will produce the same +result. Put another way, every earlier strategy is the same as +every later strategy except with a restricted domain. + +A `%meet` merge only succeeds if the changes from the merge base +to Alice's head (hereafter, "Alice's changes") are in different +files than Bob's changes. In this case, the parents are both +Alice's and Bob's heads, and the data is the merge base plus +Alice's changed files plus Bob's changed files. + +A `%mate` merge attempts to merge changes to the same file when +both Alice and Bob change it. If the merge is clean, we use it; +otherwise, we fail. A merge between different types of changes -- +for example, deleting a file vs changing it -- is always a +conflict. If we succeed, the parents are both Alice's and Bob's +heads, and the data is the merge base plus Alice's changed files +plus Bob's changed files plus the merged files. + +A `%meld` merge will succeed even if there are conflicts. If +there are conflicts in a file, then we use the merge base's +version of that file, and we produce a set of files with +conflicts. The parents are both Alice's and Bob's heads, and the +data is the merge base plus Alice's changed files plus Bob's +changed files plus the successfully merged files plus the merge +base's version of the conflicting files. + +##### Meta-strategies + +There's also a meta-strategy `%auto`, which is the most common. +If no strategy is supplied, then `%auto` is assumed. `%auto` +checks to see if Bob's desk exists, and if it doesn't we use a +`%init` merge. Otherwise, we progressively try `%fine`, +`%meet`, and `%mate` until one succeeds. + +If none succeed, we merge Bob's desk into a scratch desk. Then, +we merge Alice's desk into the scratch desk with the `%meld` +option to force the merge. For each file in the produced set of +conflicting files, we call the `++mash` function for the +appropriate mark, which annotates the conflicts if we know how. + +Finally, we display a message to the user informing them of the +scratch desk's existence, which files have annotated conflicts, +and which files have unannotated conflicts. When the user has +resolved the conflicts, they can merge the scratch desk back into +Bob's desk. This will be a `%fine` merge since Bob's head is in +the ancestry of the scratch desk. + +### Autosync + +Since `%clay` is reactive, it has a subscription interface. +Changes to the filesystem create events which code at Layers 3 or +4 (vanes or apps) can listen to. + +The `:hood` appliance uses subscriptions to implement "autosync". +When one desk is synced to another, any changes to the first desk +are automatically applied to the second -- for any two desks, on +any two urbits. + +Autosync isn't just mirroring. The target desk might have +changes of its own. We use the full merge capabilities of +`%clay` to try to make the merge clean. If there are conflicts, +it'll notify you through `:talk`, and ask you to resolve. + +There can be complex sync flows, many of which are useful. +Often, many urbits will be synced to some upstream desk that is +trusted to provide updates. Sometimes, it's useful to sync two +desks to each other, so that changes to one or the other are +mirrored. Cyclical sync structures are normal and healthy. +Also, one desk can be the target of multiple autosyncs. diff --git a/pub/doc/tools/dojo.md b/pub/docs/user/dojo.mdy similarity index 74% rename from pub/doc/tools/dojo.md rename to pub/docs/user/dojo.mdy index 8cb0c414d6..a7c77a2c45 100644 --- a/pub/doc/tools/dojo.md +++ b/pub/docs/user/dojo.mdy @@ -1,44 +1,48 @@ +--- +title: Dojo manual +sort: 8 +--- -# `:dojo` +# `:dojo` manual -`:dojo` shell basics. +The dojo is a typed functional shell. Assuming our default +plot `~fintud-macrep`, Its prompt is: - ~urbit-name:dojo> + ~fintud-macrep:dojo> - -### Quickstart +## Quickstart To print a Hoon expression or other recipe: - ~urbit-name:dojo> (add 2 2) + ~fintud-macrep:dojo> (add 2 2) To save a recipe as a variable `foo`: - ~urbit-name:dojo> =foo (add 2 2) + ~fintud-macrep:dojo> =foo (add 2 2) To save as a unix file (`$pier/.urb/put/foo/bar.baz`): - ~urbit-name:dojo> .foo/bar/baz (add 2 2) + ~fintud-macrep:dojo> .foo/bar/baz (add 2 2) To save as an urbit file (`/===/foo/bar/baz`): - ~urbit-name:dojo> *foo/bar/baz (add 2 2) + ~fintud-macrep:dojo> *foo/bar/baz (add 2 2) A noun generator with ordered and named arguments: - ~urbit-name:dojo> +make one two three, =foo (add 2 2), =bar 42 + ~fintud-macrep:dojo> +make one two three, =foo (add 2 2), =bar 42 A poke message to an urbit daemon: - ~urbit-name:dojo> :~urbit-name/talk (add 2 2) + ~fintud-macrep:dojo> :~fintud-macrep/talk (add 2 2) A system command to `:hood`: - ~urbit-name:dojo> |reload %vane + ~fintud-macrep:dojo> |reload %vane -### Manual +## Manual An Urbit value is called a "noun." A noun is either an unsigned integer ("atom") or an ordered pair of nouns ("cell"). Nouns @@ -48,12 +52,20 @@ The dojo is your safe space for hand-to-hand combat with nouns. Every dojo command builds a "product" noun functionally, then applies this product in a side effect -- show, save, or send. -#### Theory +### Theory -The dojo is not just a Hoon interpreter. Hoon is a purely -functional language; dojo recipes are *conceptually* functional, -but they often use concrete actions or interactions. A simple -Hoon expression is only one kind of recipe. +In the quickstart we learned a crude interpretation of the dojo +in terms of "expressions, generators and operations." While +nothing in the quickstart section is inaccurate, it's not the way +the system works internally. + +*All* dojo lines are commands. An operation uses a *recipe* +to create a noun, which the command uses in its side effect. +Just printing the noun is a trivial case of a command. + +Recipes are *conceptually* functional, but often use concrete, +stateful action sequences. A simple Hoon expression (*twig*) is +purely functional, but it's only one kind of recipe. A recipe can get data from an HTTP GET request or an interactive input dialog. It can also query, even block on, the Urbit @@ -69,7 +81,8 @@ network. And each session's state is independent. (If you want to work on two things at a time, connect two console sessions to your dojo.) -Once you've built your product noun, you show, save, or send it. +Once you've built the product of your recipe, you show, save, +or send it. You can pretty-print the product to the console. You can save it -- as a dojo variable, as a revision to the Urbit filesystem, or @@ -82,21 +95,21 @@ language, but the dojo is a dynamic interpreter. The nouns you build in the dojo are dynamically typed nouns, or "cages". A cage actually has two layers of type: "mark," a network label -(like a MIME type), and "range," a Hoon language type. When a +(like a MIME type), and "span," a Hoon language type. When a cage is sent across the Urbit network, the receiving daemon validates the noun against its own version of the mark, and -regenerates the range. +regenerates the span. Of course, sometimes a recipe produces a noun with mark `%noun`, -meaning "any noun," and range `*`, the set of all nouns. We have +meaning "any noun," and span `*`, the set of all nouns. We have no choice but to do the best we can with mystery nouns, but we prefer a formal description. -Marks let us perform a variety of formal typed operations on -nouns: validation of untrusted data, format conversion, even -patch and diff for revision control. +A mark is also called a "format." Marks let us perform a variety +of formal typed operations on nouns: validation, translation, +even patch and diff for revision control. -#### Other resources +### Other resources An excellent way to understand `:dojo` is to read the source, which is in `/===/ape/dojo/hoon`. @@ -105,25 +118,25 @@ Unfortunately, you may or may not know Hoon. We'll use some Hoon snippets here for defining structures and grammars. Just think of it as pseudocode -- the meaning should be clear from context. -#### Syntax and semantics +### Syntax and semantics To use the dojo, type a complete command at the dojo prompt. The simplest command just prints a Hoon expression: - ~urbit-name:dojo> (add 2 2) + ~fintud-macrep:dojo> (add 2 2) Hit return. You'll see: > (add 2 2) 4 - ~urbit-name:dojo> + ~fintud-macrep:dojo> Similarly in tall form, - ~urbit-name:dojo> %+ add 2 2 + ~fintud-macrep:dojo> %+ add 2 2 > %+ add 2 2 4 - ~urbit-name:dojo> + ~fintud-macrep:dojo> An incomplete command goes into a multiline input buffer. Use the up-arrow (see the console history section) to get the last @@ -131,20 +144,20 @@ command back, edit it so it's just `%+ add 2`, and press return. You'll see: > %+ add 2 - ~urbit-name/dojo< + ~fintud-macrep/dojo< Enter `2`. You'll see: > %+ add 2 2 4 - ~urbit-name/dojo> + ~fintud-macrep/dojo> The full command that parses and runs is the concatenation of all the partial lines, with a space inserted between them. To clear all multiline input, just hit return on an empty prompt. -##### Command structure +### Command structure Every finished line is parsed into one `++dojo-command`: @@ -159,31 +172,27 @@ Every finished line is parsed into one `++dojo-command`: == :: Each kind of `++dojo-command` is an action that depends on one -noun thproduction, a `++dojo-recipe`. We describe first the +noun production, a `++dojo-recipe`. We describe first the commands, then the recipes. ---- - -###### `[%show p=dojo-recipe]` +##### `[%show p=dojo-recipe]` To print the product, the command is just the recipe: - ~urbit-name:dojo> (add 2 2) + ~fintud-macrep:dojo> (add 2 2) ---- - -###### `[%verb p=term q=dojo-recipe]` +##### `[%verb p=term q=dojo-recipe]` To save the product to a variable `foo`: - ~urbit-name:dojo> =foo (add 2 2) + ~fintud-macrep:dojo> =foo (add 2 2) `foo` goes into your Hoon subject (scope) and is available to all expressions. To unbind `foo`: - ~urbit-name:dojo> =foo + ~fintud-macrep:dojo> =foo The dojo has a set of special variables, some read-write and some read-only: `dir`, `lib`, `arc`, `now`, `our`. @@ -196,10 +205,8 @@ and normally accessed/set with `%`. `lib` is a set of libraries, and Read-only specials are `now`, the current (128-bit `@da`) time, and `our`, the current urbit. ---- - -###### `[%edit p=path q=dojo-recipe]` -###### `[%save p=path q=dojo-recipe]` +##### `[%edit p=path q=dojo-recipe]` +##### `[%save p=path q=dojo-recipe]` The product is either a new version of, or a modification to, the Urbit file at the given path. (See the discussion of Urbit @@ -207,17 +214,17 @@ filesystem paths.) To save: - ~urbit-name:dojo> *%/numbers/four (add 2 2) + ~fintud-macrep:dojo> *%/numbers/four (add 2 2) To edit: - ~urbit-name:dojo> -%/numbers/four (add 2 2) + ~fintud-macrep:dojo> -%/numbers/four (add 2 2) A save (`*`) overwrites the current (if any) version of the file with a new version of any mark. The save command above will work (if you want `/numbers/four` at your current path). -An edit (`-`) produces a diff whose mark has to match the diff +An edit (`-`) applies a diff whose mark has to match the diff mark for the current version of the file. The edit command above will not work, because evaluating a Hoon expression like `(add 2 2)` just produces a `%noun` mark, ie, an arbitrary noun. @@ -227,81 +234,74 @@ be the same version specified in the write -- in other words, we can only write to HEAD. If someone else has sneaked in a change since the version specified, the command will fail. ---- +##### `[%unix p=path q=dojo-recipe]` -###### `[%unix p=path q=dojo-recipe]` - - ~urbit-name:dojo> ./numbers/four (add 2 2) + ~fintud-macrep:dojo> ./numbers/four (add 2 2) The product is saved as a Unix file (its mark is translated to MIME, and the MIME type is mapped as the extension). ---- +##### `[%poke p=goal q=dojo-recipe]` -###### `[%poke p=goal q=dojo-recipe]` +A poke or *order* is a one-way transactional request. It either +succeeds and returns no information, or fails and produces an +error dump. -A poke is a one-way transactional request. It either succeeds -and returns no information, or fails and produces an error dump. +Every order is sent to one daemon on one urbit. The default +urbit is your urbit. The default daemon is the system daemon, +`:hood`. The following syntactic forms are equivalent: -Every poke is sent to one daemon on one urbit. The default urbit -is your urbit. The default daemon is the system daemon, `:hood`. -The following syntactic forms are equivalent: - - ~urbit-name:dojo> :~urbit-name/hood (add 2 2) - ~urbit-name:dojo> :hood (add 2 2) - ~urbit-name:dojo> :~urbit-name (add 2 2) - ~urbit-name:dojo> : (add 2 2) + ~fintud-macrep:dojo> :~fintud-macrep/hood (add 2 2) + ~fintud-macrep:dojo> :hood (add 2 2) + ~fintud-macrep:dojo> :~fintud-macrep (add 2 2) + ~fintud-macrep:dojo> : (add 2 2) Urbit pokes do not have a separate verb. The mark of the message defines the semantics of the operation. You don't call a method `foo` whose argument is a noun in mark `bar` -- you poke a noun in mark `bar`. The mark is the protocol is the method. -If the poke succeeds, you'll see an `>=` line. If not, you'll +If the order succeeds, you'll see an `>=` line. If not, you'll see an error report, typically with a stack trace. It's common (but not necessary) to use a custom generator for the daemon you're talking to. (For generators, see below.) Hence - ~urbit-name:dojo> :~urbit-name/fish +fish/catch (add 2 2) + ~fintud-macrep:dojo> :~fintud-macrep/fish +fish/catch (add 2 2) It's irritating to type "fish" twice, just because we're using a fish generator to talk to a fish daemon. Hence a shortcut: - ~urbit-name:dojo> :~urbit-name/fish|catch (add 2 2) + ~fintud-macrep:dojo> :~fintud-macrep/fish|catch (add 2 2) If we combine all these defaults, we get the "system command" shortcut: - ~urbit-name:dojo> :~urbit-name/hood +hood/reload %ames - ~urbit-name:dojo> |reload %ames + ~fintud-macrep:dojo> :~fintud-macrep/hood +hood/reload %ames + ~fintud-macrep:dojo> |reload %ames This is the most common poke, a generated message to your own hood. ---- - ##### `[%http p=? q=purl r=dojo-recipe]` The Web has its own poke, unfortunately in two flavors. To POST, - ~urbit-name:dojo> +http://website.com (add 2 2) + ~fintud-macrep:dojo> +http://website.com (add 2 2) To PUT: - ~urbit-name:dojo> -http://website.com (add 2 2) + ~fintud-macrep:dojo> -http://website.com (add 2 2) As with a poke, you'll get a >= for success, or an error report. ---- - -##### Recipes, models and filters +#### Recipes, models and filters But wait, what's a recipe? Simplifying the actual code slightly: ++ dojo-recipe :: functional build $% [%ex p=twig] :: hoon expression - [%as p=mark q=dojo-recipe] :: conversion + [%as p=mark q=dojo-recipe] :: format conversion [%do p=twig q=dojo-recipe] :: apply gate [%ge p=dojo-script] :: generator [%ur p=purl] :: get url @@ -316,9 +316,7 @@ But wait, what's a recipe? Simplifying the actual code slightly: q=(map term (unit dojo-recipe)) :: by keyword == :: ---- - -###### `[%ex p=twig]` +##### `[%ex p=twig]` The twig in an `%ex` recipe is a Hoon expression. The recipe syntax is just the Hoon syntax. @@ -331,11 +329,11 @@ A twig produces the trivial mark `%noun`, except in two cases where the dojo can do better. The dojo analyzes the twig to detect two trivial cases where direct evaluation gives us a mark: a variable reference like `foo` that matches a dojo variable, or -an urbitspace dereference like `.^(/cx/~urbit-name/main/1/foo)`. +an urbitspace dereference like `.^(/cx/~fintud-macrep/main/1/foo)`. +In either case, if we executed these through Hoon, we'd get the +same noun with the same span. ---- - -###### `[%tu p=(list dojo-recipe)]` +##### `[%tu p=(list dojo-recipe)]` A is just a tuple of recipes, using the normal Hoon syntax for a tuple. `[a]` is `a`, `[a b]` the cell `[a b]`, `[a b c]` the @@ -343,9 +341,7 @@ cell `[a [b c]]`. A tuple, unless it's a trivial 1-tuple, is always marked `%noun`. ---- - -###### `[%ge p=dojo-script]` +##### `[%ge p=dojo-script]` A `%ge` is a generator, a configurable script loaded from the filesystem. @@ -358,7 +354,7 @@ arguments are recipes. The path specifies a Hoon source file in For the path `/fun/make`, the ordered arguments `1`, `2` and `3`, and the named arguments `foo` and `bar`, the syntax is: - ~urbit-name:dojo> +fun/make 1 2 3, =foo (add 2 2), =bar 42 + ~fintud-macrep:dojo> +fun/make 1 2 3, =foo (add 2 2), =bar 42 Unless this non-closed form is the end of a command, it needs to be surrounded by `[]` to make it play well with others. @@ -371,37 +367,29 @@ A dialog generator will take over the prompt and ask you questions. If this seems terrifying, ^D will abort the dialog, the recipe, and the command, and take you back to the dojo. ---- - -###### `[%as p=mark q=dojo-recipe]` +##### `[%as p=mark q=dojo-recipe]` `%as` is a mark conversion. Since the input to it is another recipe, we can chain them to make a conversion pipeline. To convert a recipe, just precede it with the converison form, `&mark`: - ~urbit-name:dojo> &noun (add 2 2) - ~urbit-name:dojo> &md (add 50 7) - ---- + ~fintud-macrep:dojo> &noun (add 2 2) + ~fintud-macrep:dojo> &md (add 50 7) -###### `[%do p=twig q=dojo-recipe]` +##### `[%do p=twig q=dojo-recipe]` `%do` is a Hoon functino (gate) application. It can also be in a pipeline. Its syntax is a hoon expression preceeded by `_`: - ~urbit-name:dojo> _lore 'hello\0aworld' - ~urbit-name:dojo> _|=(a=@ (mul 3 a))} (add 2 2) + ~fintud-macrep:dojo> _lore 'hello\0aworld' + ~fintud-macrep:dojo> _|=(a=@ (mul 3 a))} (add 2 2) ---- - -###### `[%ur p=purl]` +##### `[%ur p=purl]` A simple HTTP get produces the result as a `%httr` noun. ---- - ### Development Developing dojo generators is the easiest form of Hoon programming. @@ -436,7 +424,7 @@ if the user specifies `=foo 42`, your `opt` is replaced with Bear in mind that dojo syntax is list-centric, so your `arg` will always end with a `~`. For instance, - ~urbit-name/dojo> +fun/make 1 2 3 + ~fintud-macrep/dojo> +fun/make 1 2 3 will generate an `arg` of `[1 2 3 ~]`. Yes, this is the only place in Urbit where we do list-centric arguments. @@ -444,29 +432,23 @@ place in Urbit where we do list-centric arguments. Note also that script configuration is typed. The user's command will fail if there's a type mismatch. But `arg` does not have to be a homogeneous list -- just a tuple with `~` on the end. Also, -you can use `arg=*` and sort out the nouns by hand. - -You can also use `*` anywhere if you're not interested in the -system context, or in +you can use `arg=*` and sort out the nouns by hand. Any value +you don't care about can simply be `*`. #### Generators -There are three kinds of generators: builders (with no special -I/O), dialogs (which read console input), and scrapers (which -pull data from the webs). Any generator can use `.^` to both -read from and block (wait for remote or delayed results) on -the Urbit global namespace. +There are four kinds of generators: builders (with no special +I/O), dialogs (which read console input), scrapers (which pull +data from the webs), and synthesizers (which produce another +generator). Any generator can use `.^` to both read from and +block (wait for remote or delayed results) on the Urbit global +namespace. A generator produces a cell whose tail is the configuration gate, -and whose head is either `%say` for a builder, `%ask` for a -dialog, or `%get` for a scraper. +and whose head is `%say` for a builder, `%ask` for a dialog, +`%get` for a scraper, and `%con` for a constructor. -(If you want to write one generator which both prompts the user -and scrapes the web, don't. Write two, and configure the second -with the result of the first. We pay a price for keeping things -stupid.) - -##### Builders +#### Builders A builder just produces a cask (mark-value cell) directly from the configuration gate. Here's the simplest builder, with a @@ -476,7 +458,7 @@ blank configuration: :- %noun "hello, world." -##### Dialogs +#### Dialogs A dialog is a console input form. We recommend using the helpful `sole` structures, with @@ -530,7 +512,7 @@ above. This takes a parsing `++rule` (here `dim:ag`, which parses a decimal), and a gate whose sample is the parsed value, producing a new dialog. -##### Scrapers +#### Scrapers Most stuff on the internets is crap, but there's exceptions. Sometimes it's nice to get it and compute functions on it. @@ -548,3 +530,8 @@ A scraper is much like a dialog, except instead of `sole-lo` and `++sole-at` takes a `purl` request url, and a gate through which to slam the result `httr`. + +#### Synthesizer + +A synthesizer simply produces another recipe. Its + diff --git a/pub/docs/user/install.mdy b/pub/docs/user/install.mdy new file mode 100644 index 0000000000..67fcbd4fd0 --- /dev/null +++ b/pub/docs/user/install.mdy @@ -0,0 +1,94 @@ +--- +title: Install +sort: 2 +next: true +--- + +# Installation guide + +Urbit can be installed on most Unix systems. There is no Windows +port. Windows is a wonderful OS, we just haven't gotten to it yet. +Use a VM. + +## Install as a package + +### OS X - Homebrew + + brew install --HEAD homebrew/head-only/urbit + +### Ubuntu or Debian + +Third-party packages are available, at: + + https://github.com/yebyen/urbit-deb + +Urbit is only supported on Jessie onward (but outbound HTTPS +requests only work on Stretch; I wish we knew why; help us!) + +## Hand-build from source + +First, install all external dependencies. Then, make. + +### Dependencies + +urbit depends on: + + gcc (or clang) + gmp + libsigsegv + openssl + automake + autoconf + ragel + cmake + re2c + libtool + libssl-dev (Linux only) + ncurses (Linux only) + +#### Ubuntu or Debian + + sudo apt-get install libgmp3-dev libsigsegv-dev openssl libssl-dev libncurses5-dev git make exuberant-ctags automake autoconf libtool g++ ragel cmake re2c + +#### Fedora + + sudo dnf install gcc gcc-c++ git gmp-devel openssl-devel openssl ncurses-devel libsigsegv-devel ctags automake autoconf libtool ragel cmake re2c + +#### AWS + + sudo yum --enablerepo epel install gcc gcc-c++ git gmp-devel openssl-devel ncurses-devel libsigsegv-devel ctags automake autoconf libtool cmake re2c + +#### OS X - Homebrew + + brew install git gmp libsigsegv openssl libtool autoconf automake cmake + +#### OS X - Macports + + sudo port install git gmp libsigsegv openssl autoconf automake cmake + +Although `automake`/`autoconf`/`libtool` are generally installed by +default, some have reported needing to uninstall and reinstall those +three packages, at least with Homebrew. Your mileage may vary. + +#### FreeBSD + + pkg install git gmake gmp libsigsegv openssl automake autoconf ragel cmake re2c libtool + +### Download and make + +Clone the repo: + + git clone git://github.com/urbit/urbit.git + +`cd` to the directory you just created: + + cd urbit + +Run `make`: + + make + +(On FreeBSD, use `gmake` instead.) + +The executable is `bin/urbit`. Install it somewhere, or just use +it where it is. diff --git a/pub/docs/user/intro.mdy b/pub/docs/user/intro.mdy new file mode 100644 index 0000000000..7bad0a793c --- /dev/null +++ b/pub/docs/user/intro.mdy @@ -0,0 +1,136 @@ +--- +title: Introduction +sort: 1 +next: true +--- + +# Introduction + +Urbit is a clean-slate system software stack defined as a +deterministic computer. An encrypted P2P network, `%ames`, runs +on a functional operating system, Arvo, written in a strict, +typed functional language, Hoon, which compiles itself to a +combinator interpreter, Nock, whose spec gzips to 340 bytes. + +What is Urbit for? Most directly, Urbit is designed as a +personal cloud server for self-hosted web apps. It also uses +HTTP APIs to manage data stuck in traditional web applications. + +More broadly, Urbit's network tackles identity and security +problems which the Internet can't easily address. Programming +for a deterministic single-level store is also a different +experience from Unix programming, regardless of language. + +## Architectural overview + +A deterministic computer? Urbit's state is a pure function of +its event history. In practice it uses a memory checkpoint and +an append-only log. Every event is a transaction; Urbit is an +ACID database and a single-level store. Urbit runs on Unix now, +but it's easy to imagine on a hypervisor or even bare metal. + +A purely functional OS? Urbit is pure -- no code inside it can +make system calls or otherwise affect the underlying platform. +Instead, the top-level event function defines an I/O protocol. +It maps an input event and the current state to a list of output +actions and the subsequent state. In Hoon: + + $+([event state] [(list action) state]) + +### Nock + +Nock is a sort of nano-Lisp without syntax, symbols or lambdas. +Most Lisps are one-layer: they create a practical language by +extending a theoretically simple interpreter. The abstraction is +simple and the implementation is practical; there is no actual +codebase both simple and practical. Hoon and Nock are two +layers: Hoon compiles itself to pure Nock. Since Urbit is +defined in Nock, not Hoon, we can upgrade Hoon over the air. + +The Nock data model is especially trivial. A *noun* is an atom +or a cell. An atom is any unsigned integer. A cell is an +ordered pair of nouns. Nouns are acyclic and expose no pointer +equality test. + +### Hoon + +Hoon is a strict combinator language that avoids mathematical +theory and notation. It aims at a mechanical, imperative feel. +Hoon uses ASCII digraphs instead of keywords; there are no +user-level macros. The type system infers only forward and does +not use unification, but is not much weaker than Haskell's. The +compiler and inference engine is about 2000 lines of Hoon. + +### Arvo + +Arvo is an event-driven OS written in Hoon. It can upgrade +itself and everything inside it over the network. The Arvo +kernel proper is 500 lines of Hoon, which implements a typed +event system with explicit call-stack structure. Arvo ships +it ships with modules that provide P2P networking (`%ames`), a +revision-control system (`%clay`), a web client/server (`%eyre`), +a functional build system (`%ford`), and an application engine +`(%gall)`. + +### `%ames` + +`%ames`, the Urbit network, is an encrypted P2P protocol over +UDP. Its address space is semi-decentralized; 64-bit addresses +are hierarchically distributed, 128-bit addresses are +self-created. Addresses (or *plots*) are rendered in a phonemic +syntax for memorability. The scarcity of short plots helps +control spam and other Sybil attacks. The short plot hierarchy +is also reused as a supernode routing system for NAT traversal. + +### Apps + +Urbit ships with two default applications: a REPL or shell +`:dojo`, and a distributed user-level message-bus `:talk`. +`:talk` under the hood resembles NNTP; to the user, it looks like +a self-hosted Slack or persistent IRC. + +The full Urbit stack (compiler, standard library, kernel, +modules, and applications) is about 25,000 lines of Hoon. +Urbit is patent-free and MIT licensed. + +## Status + +Anyone can run the Urbit VM, of course. But the `%ames` network +is officially invitation-only. Not that we're antisocial -- just +that we're under construction. + +Right now, Urbit's only practical use is to (a) build Urbit and +(b) talk about Urbit. Its performance is lamentable. Its +documentation is inadequate. Its keys are test keys. Its +planets explode on a regular basis. We reserve the right to +reboot ("flag-day") the whole network. + +However, Urbit is at least out of research mode and focused more +or less exclusively on optimization and bug-fixing. So at least, +whatever you learn will stay true. And bleeding edges are fun. + +## Getting involved + +If you're interested in following Urbit, you can: + +- Read our documentation at + [urbit.org](http://urbit.org/docs) +- Subscribe to our newsletter at [urbit.org](http://urbit.org). +- Check out the + [urbit-dev](https://groups.google.com/forum/#!forum/urbit-dev) + mailing list. +- Follow [@urbit_](https://twitter.com/urbit\_) on Twitter. +- Hit us up by email, urbit@urbit.org. + We're nice! + +## Code of conduct + +Everyone involved in the Urbit project needs to understand and +respect our code of conduct, which is: "don't be rude." + +## Pronunciation and etymology + +Urbit is always pronounced "herb it," never "your bit." Not that +it's not your bit! But "herb it" just sounds better. + +The origin of the name is just the Latin *urbi*, meaning city. diff --git a/pub/docs/user/launch.mdy b/pub/docs/user/launch.mdy new file mode 100644 index 0000000000..ed6fdaad98 --- /dev/null +++ b/pub/docs/user/launch.mdy @@ -0,0 +1,111 @@ +--- +title: Launch +sort: 3 +next: true +--- + +# Launch procedure + +An urbit is a persistent server on the `%ames` P2P network. +You'll create one of these servers now. To understand what +you're building, you need to know a little about the network. + +## Launch instructions + +If you have an invitation, it's a planet like `~fintud-macrep` +and a ticket like `~fortyv-tombyt-tabsen-sonres`. Run + + urbit -w fintud-macrep -t fortyv-tombyt-tabsen-sonres + +(You can leave the `~` on, but it annoys some Unix shells.) + +If you don't have an invitation, pick a nickname for your comet, +like `mycomet`. Urbit will randomly generate a 128-bit plot: + + urbit -c mycomet + +Either way, creating your urbit will take some time. Some of +this time involves creating keys; some of it involves downloading +code over Urbit itself. Go get a cup of coffee, and/or absorb +the miscellaneous true facts below: + +## Network architecture + +An Urbit address is a 128-bit number, or *plot*. Every server on +the Urbit network (or just an "urbit") has one unique plot. + +Since Urbit is designed as a personal server, a plot is both a +network address and a digital identity. There is no additional +human-meaningful name layer like the DNS. Plots will never be +meaningful; but to make them as memorable as possible, we type +them in a phonemic syntax with one byte per syllable, like +`~harlyx-rocsev` for `0x510b.9441`. + +Urbit address space is cryptographic property, like Bitcoin, but +Urbit doesn't use a blockchain. Plots are digital land, not +digital currency; you own your urbit cryptographically, but you +don't get it by mining. In land, transfers are infrequent and +not frictionless. (Right now, all keys are test keys, and there +are no transfers, only initial invitations.) + +Urbit is semi-decentralized: it overlays a 64-bit hierarchical +structure on the low end of a 128-bit fingerprint namespace. If +you have an invitation, it's a *ticket* that lets you create a +32-bit plot, aka *planet*. If you don't have an invitation, you +have to create a 128-bit plot, aka *comet*. + +As a comet, you're not necessarily a bad person. But you could +be anyone, so you have zero reputation. You have no official +access to any Urbit services. Any connectivity you may enjoy +could be shut off at any time, and it probably will be. If the +Internet has proven one thing, it's that positive default +reputation and effectively infinite identity don't mix. + +## Substrate interactions + +Urbit doesn't run on bare chips and wires, at least not at +present. It runs as a Unix process and sends UDP packets. + +Since Urbit is a P2P network that runs over random UDP ports, +some firewalls may not like it. Urbit without connectivity still +works as an interpreter, but it can't launch without the network. + +If run as `root`, the `urbit` process can only read and write +inside the `fintud-macrep` or `mycomet` directory, which we call +your *pier*. A pier is portable; any Urbit install on any OS can +execute the same pier. (But don't *ever* run the same pier or +plot on two computers at once.) + +In the pier directory is a set of user-level mount points. Mount +points are synced Dropbox style, with Unix file changes +autocommitted to the Urbit revision control system (`%clay`) and +vice versa. (Urbit does not have its own editor -- you edit +Urbit code either with an Unix editor on a mounted file, or with +a Web editor from your browser.) + +Also within the pier is a system directory, `.urb/`, which +contains an event log (`egz.hope`), a checkpoint (`.chk`), and +I/O directories for uploads and downloads (`put` and `get`). +You can compact the pier by deleting the checkpoint, although +that means Urbit needs to re-execute its entire event history. +This will take some time. Go have a beer. + +Also in `.urb` is a file like `code.~rosrev-dinnul`. This is +your passcode; all data in the pier [XX: not yet the checkpoint] +is encrypted with it. For extra security, print out or memorize +the contents of this file, then delete it; Urbit will prompt for +the passcode on startup. Please be warned that Urbit is not at +present secure in any way! + +## Complete launch procedure + +Wait until you see a prompt, either + + ~fintud-macrep:talk() +or + + ~fintud-macrep:dojo> + +and then press ^D to quit. + +Your urbit is launched! Ladies and gentlemen, we are floating in space. diff --git a/pub/docs/user/start.mdy b/pub/docs/user/start.mdy new file mode 100644 index 0000000000..2b9edf73c3 --- /dev/null +++ b/pub/docs/user/start.mdy @@ -0,0 +1,224 @@ +--- +title: Quickstart +sort: 4 +next: true +--- + +# Quickstart + +To start your already-launched urbit, just run `urbit` with one +argument, which is the pier directory (`$PIER`). This is your +planet name if you have a planet, or the name you used with `-c` +if you have a comet: + + urbit fintud-macrep +or + + urbit mycomet + +Piers are portable. You can move a pier anywhere. But never, +*ever* run the same urbit in two places at once. (If you try to +start an urbit but already have a process is running on the same +pier, Urbit will kill the old process.) + +(Also, don't let the Unix filesystem it's on run out of disk. +This is a known way to corrupt your urbit! Sorry.) + +## Basic operation + +Out of the box, your urbit is running two default appliances, +`:dojo` (a shell or REPL) and `:talk`. Switch between them with +`^X`. Note that all apps share an output log, but `^X` switches +the prompt. + +`^D` from any default appliance exits the urbit process. + +## Compute: your `:dojo` appliance + +If your plot is `~fintud-macrep`, the dojo prompt is + + ~fintud-macrep:dojo> + +Type any Hoon expression at the command line and see the result: + + ~fintud-macrep:dojo> (add 2 2) + +You'll see: + + > (add 2 2) + 4 + ~fintud-macrep:dojo> + +### Dojo expressions, generators and operators + +`:dojo` is of course a command-line REPL or shell. But it +remains functional in spirit. To simplify a little, there are +three kinds of `:dojo` lines: expressions, generators, commands. + +Dojo *expressions* are like `(add 2 2)` -- they simply compute +and print a value, without any side effects, from a twig of Hoon. + +Expressions are a simple case of `generators`, which are +functional *in principle* -- rather like HTTP GET requests. In +fact, GET requests are one data source for generators. Others +include the Urbit namespace, prompting the user, etc. A +generator command line always starts with `+`, as in `+ls`. + +Commands are all other lines. Command lines normally start with +`|`, as in `|mount %`, or `:`, as in `:hood +hood/mount %`. The +latter is just an abbreviation for the former. + +A command is generally an *order* to some local or remote +appliance. An order is a transactional message: like a +request-response pair, but a success response is empty, whereas a +failure contains an error report. + +Orders have no output unless they fail. If they fail, they print +an error trace. If they succeed, they just print a demure `>=`, +confirming that the receiving appliance did as it was told. + +For instance, in Unix, the `rm` and `ls` commands both run a +process. In Urbit, `|rm` is a command; it changes your +filesystem. `+ls` is a generator; it produces a value. + +## Converse: your `:talk` appliance + +To use Urbit as a social network, switch to `:talk`: + + ~fintud-macrep:talk() + +Join the global station `urbit-meta`: + + ~fintud-macrep:talk() ;join ~doznec/urbit-meta + +You're on the air! You should see some backlog to give you +context. Please remember our code of conduct: don't be rude. +Also, `urbit-meta` is politically correct and safe for work. + +## Using the filesystem + +The Urbit filesystem, `%clay`, is a revision-control system (like +`git`) that syncs to a Unix directory (like Dropbox). While you +can of course create `%clay` changes from within Urbit, Unix has +mature editors and file handling tools. + +So usually, the best way to work with `%clay` files is to make +edits in a Unix mirror directory, and let the Urbit interpreter +commit them as changes. A simple way to set this up is to mount +the default `%home` desk: + + ~fintud-macrep:dojo> |mount % + +This mirrors your `%home` desk against `$PIER/home`, and tells +the `urbit` process to monitor the latter with `inotify()` etc. +The mount is two-way: Unix edits propagate up to Urbit, Urbit +changes fall down into Unix. + +In fact, the source for this page is here: + + $PIER/home/pub/docs/user/start.mdy + +If you're reading this on your own ship, edit the file with the +browser still open. Isn't that cool? Now, change it back -- you +don't particularly want a conflict next time we make an update, +since your `%home` desk is generally synced to our repo. + +Reactive auto-updates are a particular speciality of `%clay.` We +use them to drive auto-updates of code at every layer. A normal +Urbit user never has to think about software update. + +## Read the docs + +Your urbit is your personal web server. The best place to read +its docs is by pointing your browser at it. You can also post +your own documents, of course. + +Urbit prints the HTTP port it's serving when it starts up: + + http: live (insecure) on 8080 + +8080 is the default. If you're running on AWS or another cloud +service, this port may be firewalled; go to the firewall +configuration to open it. + +(*Always run any urbit HTTP server which is even semi-serious +inside a reliable, battle-proven frontline server like nginx.*) + +All planets, stars and galaxies are exposed to the web at +`planet.urbit.org`. (This should work via a direct DNS binding, +but at present uses a central proxy, so use it gently.) + +In a last resort, Urbit's own official planet `~magwyd-lorsug` is +also bound to just plain `urbit.org`, and hosts the public docs +here. Always trust content from `~magwyd-lorsug`! + +But assuming it's `localhost:8080`, the Urbit docs are at + + http://localhost:8080/home/docs + +## Publish your own files + +Urbit is a simple platform for publishing your own content. +Again, this is normally done by populating a Unix directory +which mirrors an Urbit node. + +From Unix, just `mkdir -p $PIER/home/pub/my`. Populate this tree +with content files whose extension are any Urbit mark. Start +with `.md` for markdown, or just `.html`. + +This is just like populating an Apache `public_html` directory. +The request + + http://localhost:8080/home/pub/my/foo/bar/baz + +will render the Unix file + + $PIER/home/pub/my/foo/bar/baz.md + +and, for HTML deliveries, inject a self-monitoring script that +long-polls until the Urbit file changes. If this change is +triggered by a Unix edit, it forms a live path from the +developer's `vim` buffer to the user's screen. (This circuit is +highly cacheable, so more practical than it may sound, but of +course you don't have to use it if you don't want to.) + +Also, you can use the `/tree` fabricator to add a standard +navigation layer to your document hierarchy. Your HTML can even +decorate itself with generic navigation macros for easier +navigation and browsing. Just replace `home/pub` in your URLs +with `home/tree/pub`: + + http://localhost:8080/home/tree/pub/my/foo/bar/baz + +(The `/tree` system is behind the page you're reading. The +documentation prefix in your URL bar above, `home/docs`, is just +an alias for `home/tree/pub/docs`. You can compare these pages +to `home/pub/docs` to see the work `/tree` is doing.) + +## Functional publishing + +Finally, anywhere your mirror directory can contain a static data +file, it can contain a Hoon program that generates the same value +functionally. Just replace the last part of the path with a +directory, containing a `.hook` file under the extension. + +(Yes, this path geometry is a little funky. We're probably going +to change it soon.) + +For instance, instead of `my/foo/bar/baz.md`, we have +`my/foo/bar/baz/md.hook`. This is not a markdown file; it's a +Hoon source file, containing a function that producing a markdown +file. + +The function's argument is the rest of the path; requesting +`my/foo/bar/baz/moo/too` just passes `[%moo %too ~]` to +`foo/bar/baz/md.hook`. Note that when you use a query string on +your URL, it gets encoded into a path segment, so the query is in +the path as well. + +This "functional publishing" model is obviously how both the +`tree` virtual hierarchy and the `docs` alias work. There's a +lot of other things you can do with it. But you can see a simple +example in `$PIER/try/hello/hymn.hook`, accessible at + + http://localhost:8080/home/try/hello diff --git a/pub/doc/tools/talk.md b/pub/docs/user/talk.mdy similarity index 70% rename from pub/doc/tools/talk.md rename to pub/docs/user/talk.mdy index 52f630c4b6..16759a0165 100644 --- a/pub/doc/tools/talk.md +++ b/pub/docs/user/talk.mdy @@ -1,6 +1,10 @@ -# `:talk` +--- +title: Talk manual +sort: 7 +next: true +--- -`:talk` messaging interface. +# `:talk` manual `:talk` is the Urbit appliance for chatter and notifications. For less sophisticated users, Urbit *is* just `:talk`. If you @@ -26,10 +30,10 @@ arbitrary content in posts, from URLs to images to long-form text. (Only URLs right now.) However, any message on `:talk` has to be able to summarize itself in a 64-byte text line. -There are four kinds of station: a write-only "mailbox" for -direct messages, an invite-only "party" for private conversation, -a read-only "journal" for curated content, and a public-access -"board" for general use or abuse. +There are four kinds of station: a write-only `%mailbox` for +direct messages, an invite-only `%party` for private conversation, +a read-only `%journal` for curated content, and a public-access +`%board` for general use or abuse. While there's obviously no central `:talk` server for all of Urbit, and thus no such thing as a truly global station space, @@ -37,75 +41,79 @@ active Urbit stars cooperate to federate, manage and mirror a collectively-managed namespace, very like Usenet. These "federal" stations are generally public-access boards. -### Quickstart +Right now, the only public federal station is `urbit-meta`. +Because the party always starts in the kitchen. + +## Quickstart Let's post something! At the default `:talk` prompt -``` -~tasfyn-partyv:talk: -``` + + ~fintud-macrep:talk() + type the message: -``` -~tasfyn-partyv:talk: hello, world. -``` + + ~fintud-macrep:talk() hello, world. + And hit return. Don't worry, no one but you will see this. The `:` means you're posting to yourself. You'll get the post: -``` -~tasfyn-partyv: hello, world. -~tasfyn-partyv:talk: -``` + + ~fintud-macrep: hello, world. + ~fintud-macrep:talk() + It's boring to post to yourself. Let's join a station: -``` -~tasfyn-partyv: ;join /urbit-test -``` -(`/urbit-test` is a federal station, meaning it's hosted by your -star (for `~tasfyn-partyv`, `~doznec`). The `/` notation is just -an abbreviation for `~doznec/urbit-test`.) + + ~fintud-macrep: ;join /urbit-meta + +(`/urbit-meta` is a federal station, meaning it's hosted by your +star (for `~fintud-macrep`, `~doznec`). The `/` notation is just +an abbreviation for `~doznec/urbit-meta`.) You'll see: -``` ----------:talk| %porch subscribed to /urbit-test, called `>` ----------:talk| rules of /urbit-test: ----------:talk| test posts only. no shitposting. no pedos/nazis. - ~doznec> ~tasfyn-partyv admitted to %urbit-test -~tasfyn-partyv:talk> -``` + + ---------:talk| %porch subscribed to /urbit-meta, called `>` + ---------:talk| rules of /urbit-meta: + ---------:talk| don't be rude + ---------:talk| urbit-meta is politically correct and safe for work + ~doznec= ~fintud-macrep admitted to %urbit-meta + ~fintud-macrep:talk= + Notice the character assignment - stations you're subscribed to are assigned [consistent ASCII glyphs](#-station-glyphs), which you'll see in the log when you hear from these stations, and on the prompt when you're talking to them. -Post a line to `/urbit-test`: -``` -~tasfyn-partyv:talk> hello, world -``` +Post a line to `/urbit-meta`: + + ~fintud-macrep:talk= hello, world + You'll see, echoed back at you through `~doznec`: -``` -~tasfyn-partyv:talk> hello, world -``` -And of course, anyone else in `/urbit-test` will see it as well. -But you don't care about `/urbit-test`, so leave it: -``` -~tasfyn-partyv:talk> ;leave -``` + + ~fintud-macrep:talk= hello, world + +And of course, anyone else in `/urbit-meta` will see it as well. +But you don't care about `/urbit-meta`, so leave it: + + ~fintud-macrep:talk= ;leave + You'll see: -``` ----------:talk| %porch has left /urbit-test, called `>` -``` + + ---------:talk| %porch has left /urbit-meta, called `>` + Everyone else will see: -``` - ~doznec> ~tasfyn-partyv has left %urbit-test -``` + + ~doznec= ~fintud-macrep has left %urbit-meta + Now you're ready to use `:talk` for real! List the federal groups currently available with -``` -~tasfyn-partyv:talk> ;list -``` + + ~fintud-macrep:talk= ;list + For general discussion about Urbit, we recommend `/urbit-meta`. -### Basic usage +## Manual -#### Input conventions +### Input conventions There are three kinds of inputs you can type at the `:talk` prompt: lines, URLs, and commands. @@ -120,7 +128,7 @@ will be posted in multiple lines. A URL is any valid URL. A command is any line starting with `;`. -#### Source annotation +### Source annotation Any post in your flow is shown with its author, together with a glyph that shows how the post reached you. A post can reach you @@ -136,32 +144,30 @@ glyph. Posts to a station use that station's glyph. You can see a list of glyph bindings with `;what`. Write `;what >` to see what station `>` is bound to, or -`;what /urbit-test` to see if `/urbit-test` has a binding. +`;what /urbit-meta` to see if `/urbit-meta` has a binding. -#### Audience selection +### Audience selection Audience selection is important in a multiplexed communicator! The audience is always shown in your prompt. If there's a glyph for it, it's shown as the glyph: -``` -~tasfyn-partyv:talk> -``` + + ~fintud-macrep:talk= + Otherwise, the audience is shown in parens: -``` -~tasfyn-partyv:talk(~wictuc-folrex) -``` + + ~fintud-macrep:talk(~dannum-mitryl) `:talk` works fairly hard to get the audience right and minimize manual switching. But to manually set the audience, the command -is simply `;station` - eg, `;~wictuc-folrex` for a direct post; -`/urbit-test` or `~doznec/urbit-test` to post to a federal +is simply `;station` - eg, `;~dannum-mitryl` for a direct post; +`/urbit-meta` or `~doznec/urbit-meta` to post to a federal station, `%mystation` to post to a station on your own ship. For a station bound to a glyph, `;` then the glyph; eg, `;>`. You can post a line and set the audience in one command, eg: -``` -;~wictuc-folrex this is a private message -``` + + ;~dannum-mitryl this is a private message You can configure your audience in a number of ways, which are applied in priority order. From strongest to weakest: @@ -177,7 +183,7 @@ the start of the line and pressing backspace (whether the line is empty or not). Posting a line clears the typing and activation configurations. -#### Post activation and numbering +### Post activation and numbering Every post can summarize itself in 64 bytes. But some posts contain more information, which is not displayed by default. @@ -186,14 +192,14 @@ post, it's marked by an underscore `_`, instead of a space, between source and content. The conventional example is a URL. When you post a URL: -``` -~tasfyn-partyv:talk> http://foobar.com/moo/baz -``` + + ~fintud-macrep:talk= http://foobar.com/moo/baz + This will appear in the flow as: -``` -~tasfyn-partyv>_foobar.com -``` -meaning that `~tasfyn-partyv` posted a link to `foobar.com`, + + ~fintud-macrep>_foobar.com + +meaning that `~fintud-macrep` posted a link to `foobar.com`, on the station or conversation whose glyph is `>`. The effect of activating a post depends on the post. For a link, @@ -203,14 +209,14 @@ post, activating shows the full audience, for complex audiences. Posts in your `:talk` flow are numbered; the numbers are printed every five posts, as -``` -----------[5955] -``` + + ----------[5955] + You can specify a post to activate in two ways: by absolute or relative position. Absolute position is a direct history number: -``` -;5955 -``` + + ;5955 + If you use fewer digits than are in the current flow number, the high digits are defaulted "deli style" - if the current number is 5955, typing `;3` means `;5953`, and `;140` means `;5140`. To @@ -220,24 +226,24 @@ A unary sequence of `;` characters looks backward from the present. `;` activates the most recent post; `;;` the second most recent; etc. -#### Nicknames +### Nicknames Sometimes you know your Urbit friends by other names, on or offline. Use the `;nick` command to assign or look up nicknames. `;nick` with no arguments lists all nicknames; `;nick -~tasfyn-partyv` looks up a nickname; `;nick curtis` searches in -reverse; `;nick ~tasfyn-partyv curtis` creates a nickname. +~fintud-macrep` looks up a nickname; `;nick plato` searches in +reverse; `;nick ~fintud-macrep plato` creates a nickname. All nicknames must be 14 characters or less, lowercase. Of course, nicknames are strictly local - like the names on entries in a phonebook. Sometimes in a post you want to mention -someone you know by a nickname. Just type `~curtis`, and `:talk` -will replace it magically with `~tasfyn-partyv` (or beep if no -`~curtis` is bound). +someone you know by a nickname. Just type `~plato`, and `:talk` +will replace it magically with `~fintud-macrep` (or beep if no +`~plato` is bound). -#### Presence +### Presence You'll see presence notifications when people enter or leave stations you're subscribed to. @@ -245,43 +251,41 @@ stations you're subscribed to. `;who` lists everyone in all your stations. `;who station` lists everyone in that station. -#### Typing indicator +### Typing indicator If one or more urbits in your audience is typing, `:talk`'s presence system will detect it and change the prompt: -``` -~tasfyn-partyv [~wictuc-folrex...]> -``` -#### Creating and managing stations + ~fintud-macrep [~dannum-mitryl...]= + +### Creating and managing stations To create your own mailbox, party, journal or board: -``` -;create party %myfunparty -;create journal %serious-journal -;create board %bizarre-board -``` + + ;create party %myfunparty + ;create journal %serious-journal + ;create board %bizarre-board + etc. Every form of station has an exception list; to block -`~wictuc-folrex` from your default mailbox `%porch`, -``` -;block %porch ~wictuc-folrex -``` -To invite people to `%myfunparty`: -``` -;invite %myfunparty ~wictuc-folrex, ~sondel-forsut -``` -To ban from `%bizarre-board`: -``` -;banish %bizarre-board ~wictuc-folrex -``` -To appoint a coauthor of `%serious-journal`: -``` -;author %serious-journal ~sondel-forsut -``` +`~dannum-mitryl` from your default mailbox `%porch`, -#### Station Glyphs + ;block %porch ~dannum-mitryl + +To invite people to `%myfunparty`: + + ;invite %myfunparty ~dannum-mitryl, ~lagret-marpub + +To ban from `%bizarre-board`: + + ;banish %bizarre-board ~dannum-mitryl + +To appoint a coauthor of `%serious-journal`: + + ;author %serious-journal ~lagret-marpub + +#### Station glyphs Station are assigned out of the list `:|}>`, then randomly out of it and the sets `-+*.`, ``,=`'^\/``, diff --git a/pub/sole/fab/hymn.hook b/pub/dojo/fab/hymn.hook similarity index 100% rename from pub/sole/fab/hymn.hook rename to pub/dojo/fab/hymn.hook diff --git a/pub/sole/src/main.coffee b/pub/dojo/src/main.coffee similarity index 100% rename from pub/sole/src/main.coffee rename to pub/dojo/src/main.coffee diff --git a/pub/sole/src/share.coffee b/pub/dojo/src/share.coffee similarity index 100% rename from pub/sole/src/share.coffee rename to pub/dojo/src/share.coffee diff --git a/pub/paste/elem.hook b/pub/paste/elem.hook index 9aef1622e7..d4b9a18d02 100644 --- a/pub/paste/elem.hook +++ b/pub/paste/elem.hook @@ -1,11 +1,20 @@ /= all /; flop /^ (list (pair time ,*)) /: /%%/ /& /mime/ ;div + ;link(rel "stylesheet", href "/home/lib/base.css"); + ;link(rel "stylesheet", href "/home/pub/paste/main.css"); ;script@"//code.jquery.com/jquery-2.1.4.min.js"; ;script@"/~/at/home/lib/urb.js"; ;script:''' + document.title = 'pastebin - urbit' urb.appl = 'write' urb.send.mark = 'write-paste' submit = function(){ + if($("select :selected").attr('value')===undefined) { + $("select").addClass('err') + return false + } + $("select").removeClass('err') + $("textarea,button").attr('disabled', true) urb.send({ txt:$("textarea").val(), typ:$("select :selected").val() @@ -17,14 +26,16 @@ } ''' :: + ;h1: New ;p:textarea; - ;button(onclick "submit()"):"Submit" ;select + ;option(): Type ;option(value "md"): Markdown ;option(value "txt"): Text ;option(value "hoon"): Hoon == + ;button(onclick "submit()"):"Submit" ;hr; - ; recent: + ;h1: Recent ;* (turn all |=([a=time *] ;p:a/"paste/{}":"{}")) == diff --git a/pub/paste/main.css b/pub/paste/main.css new file mode 100644 index 0000000000..3b276692c8 --- /dev/null +++ b/pub/paste/main.css @@ -0,0 +1,42 @@ +body { + font-family: 'bau'; + margin: 4rem; + width: 48rem; +} + +hr { + width: 8rem; + height: .2rem; + background-color: #ccc; + margin: 3rem 0; + border: 0; +} + +button, +textarea { + outline: none; +} + +button { + margin-left: 1rem; +} + +textarea { + width: 100%; + min-height: 12rem; + border: 0; + background-color: #eaeaea; +} + +select { + border: 3px solid transparent; +} + +.err { + border: 3px solid red; +} + +a { + font-family: 'scp'; + color: inherit; +} \ No newline at end of file diff --git a/pub/tree/src/css/leads.styl b/pub/tree/src/css/leads.styl index a74d61b95c..335f9ab551 100644 --- a/pub/tree/src/css/leads.styl +++ b/pub/tree/src/css/leads.styl @@ -1,10 +1,11 @@ -#cont.lead +.lead #body margin-top 3rem margin-top 0 .bar margin-top 2rem + margin-bottom 2rem & > div display inline-block a.logo @@ -14,17 +15,28 @@ border none img.logo margin-right 18px - ul.list.nav + margin-top 0 + ul margin 0 + line-height 2rem + display inline-block + li::before + content '' + padding-right none li display inline-block margin-bottom 0 margin-right 1rem + vertical-align middle li a border-bottom none + text-decoration underline + li a h1 + margin 0 + line-height inherit + text-transform capitalize + font-size 1.2rem h1 - text-decoration none - border-bottom 2px solid #000 text-transform capitalize font-size 1rem font-weight 400 @@ -37,27 +49,31 @@ &.fold margin-top 6rem - .list > li > a h1 - margin 0 - line-height inherit - text-transform capitalize - font-size 1.2rem + .list li h1 + line-height 2rem & font-size 1.6rem line-height 2.6rem a - line-height 1.6rem line-height 1rem + + p + font-size 1.6rem + line-height 3rem + + .mono + font-size 1.3rem .footer margin 4rem 0 4rem 0 - font-weight 200 - line-height 1rem - color #ccc p margin 0 + font-size .7rem + code + line-height 1rem + font-size .7rem input.email border-radius 0 @@ -92,47 +108,43 @@ :-ms-input-placeholder color #e6e7e8 - button - &.submit - font inherit - border 0 - background-color #fff - border-bottom 3px solid #000 - font-size 1.6rem - line-height 1.6rem - display inline-block - text-align left - margin-top 1rem - height 2rem - padding 0 .6rem - - li::before - content '' - padding-right none + button.submit + font inherit + border 0 + background-color #fff + border-bottom 3px solid #000 + font-size 1.6rem + line-height 1.6rem + display inline-block + text-align left + margin-top 1rem + height 2rem + padding 0 .6rem .date font-weight 400 - - .kids p.ib - margin-top 0 - .kids h3 - font-size 1rem - - .kids h3::after - content "\2014" - margin-left 1em - - .kids h2, - div.footer - font-family 'scp' - font-size .7rem +@media only screen and (max-width: 1170px) + #cont.lead + top 0rem @media only screen and (min-width: 320px) and (max-width: 1024px) #cont.lead top 0rem font-size 1rem line-height 1.6rem + + .post h1 + font-size 2rem + line-height 3rem + + .bar + margin-top 1rem + margin-bottom 1rem + + .list li h1, + font-size 1rem + line-height 1.6rem img.logo display block @@ -141,6 +153,7 @@ h1.leader, h1.fold font-size 1.6rem + line-height 3rem margin-left 0 .list li, @@ -148,6 +161,13 @@ .list li h1, font-size 1rem line-height 1.6rem + + input.email + font-size 1.2rem + width 100% + + button.submit + font-size 1rem .date, div.footer diff --git a/pub/tree/src/css/main.css b/pub/tree/src/css/main.css index 05b71ed06b..d21413b900 100644 --- a/pub/tree/src/css/main.css +++ b/pub/tree/src/css/main.css @@ -79,6 +79,7 @@ pre, li:before, .spin, #bred a, +.mono, h3.time { font-family: "scp", "Courier New", courier, monospace; } @@ -89,11 +90,23 @@ html { line-height: 1.6rem; -webkit-text-size-adjust: none; } +p { + font-size: 1.2rem; + line-height: 2rem; +} a { color: #000; + border-bottom: 1px solid #000; + text-decoration: none; display: inline-block; line-height: 0.8rem; } +a:visited { + opacity: 0.5; +} +a code { + line-height: inherit; +} hr { display: inline-block; width: 6rem; @@ -104,7 +117,8 @@ h1 { margin-top: 4rem; line-height: 2.6rem; } -#body > div > h1:first-of-type { +#body > div > h1:first-of-type, +#body div.short > h1:first-of-type { margin-top: 1rem; } h2, @@ -135,12 +149,13 @@ h3 code { } pre, code { - font-size: 0.8rem; + font-size: 1rem; } pre { background-color: #f5f5f5; padding: 0.3rem; margin-left: -0.3rem; + white-space: pre-line; } code { line-height: 1.2rem; @@ -154,10 +169,11 @@ ul { padding: 0; } li:before { - content: "+"; + font-family: 'scp'; + content: "\2022"; padding-right: 0.3rem; - font-size: 0.8rem; - font-weight: 600; + font-size: 1rem; + font-weight: 500; } #nav, #cont { @@ -189,7 +205,7 @@ li:before { transition: opacity 0.3s ease-in-out; } #cont { - position: absolute; + position: relative; top: 0; margin-bottom: 9rem; } @@ -235,6 +251,12 @@ li:before { transition: opacity 1s ease-in-out; z-index: 4; } +img.logo.black { + content: url("https://storage.googleapis.com/urbit-extra/logo/logo-black-100x100.png"); +} +img.logo.white { + content: url("https://storage.googleapis.com/urbit-extra/logo/logo-white-100x100.png"); +} img.logo { height: 2rem; width: 2rem; @@ -254,7 +276,7 @@ h3.time { overflow: hidden; } #nav #sibs > div { - margin-bottom: 4px; + margin-bottom: 0.6rem; } .focus #sibs { margin-top: 0 !important; @@ -262,35 +284,39 @@ h3.time { } #nav a, .list > li > a { + display: inline; +} +#nav a { text-transform: uppercase; font-size: 0.7rem; line-height: 1rem; font-weight: 200; letter-spacing: 1px; } -.list > li > a { - margin-bottom: 0.3rem; +.link-next { + margin-top: 2rem; + font-weight: 500; +} +.list > li { + margin-bottom: 1rem; } .list > li > a h1 { font-size: inherit; line-height: inherit; } -#nav a, -.list > li > a h1 { - margin-right: 0.3rem; -} #nav .active a { font-weight: 500; text-decoration: none; } #up { padding-right: 1rem; - margin-top: -0.3rem; + margin-top: -8px; } #sides { float: right; } #sides a { + display: inline-block; margin-right: 0.6rem; } .arow-up, @@ -393,31 +419,6 @@ h2.sub { margin-top: 0; text-transform: uppercase; } -div.post h1 { - font-size: 2.8rem; - line-height: 4rem; - display: block; - margin-top: 1rem; - margin-bottom: 1rem; -} -div.post h2 { - line-height: 1rem; - letter-spacing: 1px; -} -div.post h2 { - margin-top: 4rem; -} -div.post h2 { - font-size: 1.2rem; - font-weight: 500; -} -div.post p { - font-size: 1.2rem; - line-height: 2.2rem; -} -div.post li p { - display: inline; -} div.toc { margin-top: 3rem; margin-bottom: 3rem; @@ -460,6 +461,13 @@ div.toc h1.t { padding-right: 1rem; margin-left: -1rem; } +.footer p { + font-family: 'scp'; + font-size: 0.7rem; + font-weight: 400; + margin-top: 3rem; + color: #ccc; +} .error { color: #f91733; } @@ -484,124 +492,148 @@ div.toc h1.t { .warning.w { width: auto; } -h1.lead { +div.post h1 { + font-size: 2.8rem; + line-height: 4rem; margin-top: 1rem; - margin-bottom: 6rem; - font-size: 1.6rem; - line-height: 2rem; - margin-bottom: 3rem; -} -h2 { - margin-top: 3rem; -} -h2.date { - margin-top: 0; - line-height: 0.7rem; -} -.list.posts .post { - margin-bottom: 2rem; -} -.list.posts .post h1 { - text-transform: none; - font-size: 1.6rem; - line-height: 1.8rem; margin-bottom: 1rem; - display: block; } -.list.posts .post h2 { - font-size: 0.7rem; - font-weight: 400; - line-height: 1rem; - margin-top: 0; -} -.list.posts li.post:before { - content: ""; -} -.date { - font-weight: 400; +div.post h2 { font-family: 'scp'; font-size: 0.7rem; + line-height: 1rem; + letter-spacing: 1px; +} +div.post h3 { + margin-top: 0; + font-size: 0.7rem; + font-weight: 200; + letter-spacing: 0.1rem; + text-transform: uppercase; +} +div.post h3::before { + content: '\2014'; + margin-right: 0.3rem; +} +div.post .list li { + margin-bottom: 0; + font-size: 1.2rem; +} +div.post p { + font-size: 1.2rem; + line-height: 2.2rem; +} +div.post li p { + display: inline; +} +div.post p.ib { + margin-top: 0; } @media only screen and (min-width: 320px) and (max-width: 1024px) { - #cont { - top: 2rem; + div.post h2, + div.post h3 { + font-size: 0.5rem; } - h1.lead { - font-size: 1.6rem; + div.post p { + font-size: 1rem; line-height: 2rem; - margin-bottom: 3rem; + } + div.post li h1 { + font-weight: 400; + } + div.post li::before { + font-weight: 200; } } -#cont.lead #body { +.lead #body { margin-top: 3rem; margin-top: 0; } -#cont.lead .bar { +.lead .bar { margin-top: 2rem; + margin-bottom: 2rem; } -#cont.lead .bar > div { +.lead .bar > div { display: inline-block; } -#cont.lead .bar a.logo { +.lead .bar a.logo { display: inline-block; height: 2rem; vertical-align: middle; border: none; } -#cont.lead .bar img.logo { +.lead .bar img.logo { margin-right: 18px; + margin-top: 0; } -#cont.lead .bar ul.list.nav { +.lead .bar ul { margin: 0; + line-height: 2rem; + display: inline-block; } -#cont.lead .bar ul.list.nav li { +.lead .bar ul li::before { + content: ''; + padding-right: none; +} +.lead .bar ul li { display: inline-block; margin-bottom: 0; margin-right: 1rem; -} -#cont.lead .bar ul.list.nav li a { - border-bottom: none; -} -#cont.lead .bar ul.list.nav h1 { - text-decoration: none; - border-bottom: 2px solid #000; - text-transform: capitalize; - font-size: 1rem; - font-weight: 400; - letter-spacing: 0.03rem; -} -#cont.lead h1 { - margin-top: 1rem; vertical-align: middle; - line-height: 4rem; } -#cont.lead h1.fold { - margin-top: 6rem; +.lead .bar ul li a { + border-bottom: none; + text-decoration: underline; } -#cont.lead .list > li > a h1 { +.lead .bar ul li a h1 { margin: 0; line-height: inherit; text-transform: capitalize; font-size: 1.2rem; } -#cont.lead { +.lead .bar ul h1 { + text-transform: capitalize; + font-size: 1rem; + font-weight: 400; + letter-spacing: 0.03rem; +} +.lead h1 { + margin-top: 1rem; + vertical-align: middle; + line-height: 4rem; +} +.lead h1.fold { + margin-top: 6rem; +} +.lead .list li h1 { + line-height: 2rem; +} +.lead { font-size: 1.6rem; line-height: 2.6rem; } -#cont.lead a { - line-height: 1.6rem; +.lead a { line-height: 1rem; } -#cont.lead .footer { +.lead p { + font-size: 1.6rem; + line-height: 3rem; +} +.lead .mono { + font-size: 1.3rem; +} +.lead .footer { margin: 4rem 0 4rem 0; - font-weight: 200; - line-height: 1rem; - color: #ccc; } -#cont.lead .footer p { +.lead .footer p { margin: 0; + font-size: 0.7rem; } -#cont.lead input.email { +.lead .footer code { + line-height: 1rem; + font-size: 0.7rem; +} +.lead input.email { border-radius: 0; font: inherit; font-family: 'scp'; @@ -615,28 +647,28 @@ h2.date { margin-right: 1rem; display: inline-block; } -#cont.lead input.email.valid { +.lead input.email.valid { color: #99f27c; } -#cont.lead input.email.error { +.lead input.email.error { color: #f74040; } -#cont.lead .email:empty:not(:focus):before { +.lead .email:empty:not(:focus):before { content: attr(data-ph); } -#cont.lead ::-webkit-input-placeholder { +.lead ::-webkit-input-placeholder { color: #e6e7e8; } -#cont.lead :-moz-placeholder { +.lead :-moz-placeholder { color: #e6e7e8; } -#cont.lead ::-moz-placeholder { +.lead ::-moz-placeholder { color: #e6e7e8; } -#cont.lead :-ms-input-placeholder { +.lead :-ms-input-placeholder { color: #e6e7e8; } -#cont.lead button.submit { +.lead button.submit { font: inherit; border: 0; background-color: #fff; @@ -649,27 +681,13 @@ h2.date { height: 2rem; padding: 0 0.6rem; } -#cont.lead li::before { - content: ''; - padding-right: none; -} -#cont.lead .date { +.lead .date { font-weight: 400; } -#cont.lead .kids p.ib { - margin-top: 0; -} -#cont.lead .kids h3 { - font-size: 1rem; -} -#cont.lead .kids h3::after { - content: "\2014"; - margin-left: 1em; -} -#cont.lead .kids h2, -#cont.lead div.footer { - font-family: 'scp'; - font-size: 0.7rem; +@media only screen and (max-width: 1170px) { + #cont.lead { + top: 0rem; + } } @media only screen and (min-width: 320px) and (max-width: 1024px) { #cont.lead { @@ -677,6 +695,18 @@ h2.date { font-size: 1rem; line-height: 1.6rem; } + #cont.lead .post h1 { + font-size: 2rem; + line-height: 3rem; + } + #cont.lead .bar { + margin-top: 1rem; + margin-bottom: 1rem; + } + #cont.lead .bar .list li h1 { + font-size: 1rem; + line-height: 1.6rem; + } #cont.lead img.logo { display: block; margin-bottom: 1rem; @@ -684,6 +714,7 @@ h2.date { #cont.lead h1.leader, #cont.lead h1.fold { font-size: 1.6rem; + line-height: 3rem; margin-left: 0; } #cont.lead .list li, @@ -692,6 +723,13 @@ h2.date { font-size: 1rem; line-height: 1.6rem; } + #cont.lead input.email { + font-size: 1.2rem; + width: 100%; + } + #cont.lead button.submit { + font-size: 1rem; + } #cont.lead .date, #cont.lead div.footer { font-size: 0.6rem; @@ -732,8 +770,17 @@ h2.date { #nav a { white-space: nowrap; } + #nav #sibs { + width: 12rem; + } #nav #sibs > div { height: 20px; + margin-bottom: 4px; + } + #nav #sibs > div.active a, + #nav #sibs > div a { + border-bottom: none; + text-decoration: underline; } #nav.m-down, #nav.m-up { @@ -744,7 +791,7 @@ h2.date { top: 0; } #nav > div > div { - max-height: 1rem; + max-height: 2rem; overflow: hidden; transition: max-height 0.3s ease-in-out; } @@ -755,12 +802,19 @@ h2.date { #cont { top: 3rem; } + #cont.no-anchor { + top: 0; + } } @media only screen and (min-width: 320px) and (max-width: 1024px) { body, html { font-size: 21px; } + p { + font-size: 1rem; + line-height: 2rem; + } #nav, #cont { width: 94%; @@ -772,17 +826,17 @@ h2.date { padding-top: 0; opacity: 1; left: 0; - background-color: #fff; z-index: 2; } #nav > div > div { - max-height: 1.4rem; + max-height: 1.2rem; } #nav > div { padding-top: 0.6rem; } #nav #sibs { width: 11rem; + padding-top: 0; } #nav #sibs > div { height: 20px; @@ -793,7 +847,8 @@ h2.date { font-size: 0.7rem; } #nav #sides { - float: right; + float: none; + margin-left: 1rem; } #nav .arow-up, #nav .arow-next, @@ -812,7 +867,6 @@ h2.date { border-right: 0.6rem solid #000; } #cont { - top: 3rem; left: 0; padding-bottom: 9rem; } diff --git a/pub/tree/src/css/main.styl b/pub/tree/src/css/main.styl index 1aacfca88f..d0d06ce260 100644 --- a/pub/tree/src/css/main.styl +++ b/pub/tree/src/css/main.styl @@ -9,6 +9,7 @@ pre li:before .spin #bred a +.mono h3.time font-family "scp","Courier New",courier,monospace @@ -19,11 +20,23 @@ html line-height 1.6rem -webkit-text-size-adjust none +p + font-size 1.2rem + line-height 2rem + a color #000 + border-bottom 1px solid #000 + text-decoration none display inline-block line-height .8rem +a:visited + opacity .5 + +a code + line-height inherit + hr display inline-block width 6rem @@ -35,6 +48,7 @@ h1 line-height 2.6rem #body > div > h1:first-of-type +#body div.short > h1:first-of-type margin-top 1rem h2 @@ -65,12 +79,13 @@ h3 code pre code - font-size .8rem + font-size 1rem pre background-color #f5f5f5 padding .3rem margin-left -.3rem + white-space pre-line code line-height 1.2rem @@ -84,10 +99,11 @@ ul padding 0 li:before - content "+" + font-family 'scp' + content "\2022" padding-right .3rem - font-size .8rem - font-weight 600 + font-size 1rem + font-weight 500 #nav #cont @@ -119,7 +135,7 @@ li:before transition opacity .3s ease-in-out #cont - position absolute + position relative top 0 margin-bottom 9rem @@ -181,7 +197,7 @@ h3.time overflow hidden #nav #sibs > div - margin-bottom 4px + margin-bottom .6rem .focus #sibs margin-top 0 !important @@ -189,35 +205,39 @@ h3.time #nav a .list > li > a + display inline + +#nav a text-transform uppercase font-size .7rem line-height 1rem font-weight 200 letter-spacing 1px -.list > li > a - margin-bottom .3rem +.link-next + margin-top 2rem + font-weight 500 + +.list > li + margin-bottom 1rem .list > li > a h1 font-size inherit line-height inherit -#nav a -.list > li > a h1 - margin-right .3rem - #nav .active a font-weight 500 text-decoration none #up padding-right 1rem - margin-top -.3rem + margin-top -8px #sides float right #sides a + display inline-block margin-right .6rem .arow-up @@ -319,32 +339,6 @@ h2.sub margin-top 0 text-transform uppercase -div.post - h1 - font-size 2.8rem - line-height 4rem - display block - margin-top 1rem - margin-bottom 1rem - - h2 - line-height 1rem - letter-spacing 1px - - h2 - margin-top 4rem - - h2 - font-size 1.2rem - font-weight 500 - - p - font-size 1.2rem - line-height 2.2rem - - li p - display inline - div.toc margin-top 3rem margin-bottom 3rem @@ -387,6 +381,13 @@ div.toc padding-right 1rem margin-left -1rem +.footer p + font-family 'scp' + font-size .7rem + font-weight 400 + margin-top 3rem + color #ccc + .error color rgba(249,23,51,1) diff --git a/pub/tree/src/css/mobile.styl b/pub/tree/src/css/mobile.styl index 398bf918fb..48057ef28b 100644 --- a/pub/tree/src/css/mobile.styl +++ b/pub/tree/src/css/mobile.styl @@ -20,8 +20,17 @@ #nav a white-space nowrap + #nav #sibs + width 12rem + #nav #sibs > div height 20px + margin-bottom 4px + + #nav #sibs > div.active a + #nav #sibs > div a + border-bottom none + text-decoration underline #nav.m-down #nav.m-up @@ -32,7 +41,7 @@ top 0 #nav > div > div - max-height 1rem + max-height 2rem overflow hidden transition max-height .3s ease-in-out @@ -42,13 +51,19 @@ #cont top 3rem + #cont.no-anchor + top 0 /* tablets + phones ----------- */ @media only screen and (min-width: 320px) and (max-width: 1024px) body html font-size 21px - + + p + font-size 1rem + line-height 2rem + #nav #cont width 94% @@ -60,17 +75,17 @@ padding-top 0 opacity 1 left 0 - background-color #fff z-index 2 #nav > div > div - max-height 1.4rem + max-height 1.2rem #nav > div padding-top .6rem #nav #sibs width 11rem + padding-top 0 #nav #sibs > div height 20px @@ -81,7 +96,8 @@ font-size .7rem #nav #sides - float right + float none + margin-left 1rem #nav .arow-up #nav .arow-next @@ -98,10 +114,9 @@ border-right .6rem solid #000 #cont - top 3rem left 0 padding-bottom 9rem - + #cont h1:first-child margin-top 0 diff --git a/pub/tree/src/css/posts.styl b/pub/tree/src/css/posts.styl index 10aafe3671..d88c938691 100644 --- a/pub/tree/src/css/posts.styl +++ b/pub/tree/src/css/posts.styl @@ -1,59 +1,50 @@ -h1.lead - margin-top 1rem - margin-bottom 6rem - font-size 1.6rem - line-height 2rem - margin-bottom 3rem - -h2 - margin-top 3rem - &.date - margin-top 0 - line-height .7rem - -.list.posts - .post - margin-bottom 2rem - - .post h1 - text-transform none - font-size 1.6rem - line-height 1.8rem +div.post + h1 + font-size 2.8rem + line-height 4rem + margin-top 1rem margin-bottom 1rem - display block - .post h2 + h2 + font-family 'scp' font-size .7rem - font-weight 400 line-height 1rem - margin-top 0 + letter-spacing 1px - li.post:before - content "" + h3 + margin-top 0 + font-size .7rem + font-weight 200 + letter-spacing .1rem + text-transform uppercase + h3::before + content '\2014' + margin-right .3rem + + .list li + margin-bottom 0 + font-size 1.2rem -// #nav #sibs -// width 11rem -// & > div -// margin-bottom .6rem -// height auto -// margin-bottom 4px -// height 20px -// a -// white-space normal -// border none -// white-space nowrap -// border-bottom 1px solid #000 - -.date - font-weight 400 - font-family 'scp' - font-size .7rem + p + font-size 1.2rem + line-height 2.2rem + + li p + display inline + + p.ib + margin-top 0 @media only screen and (min-width: 320px) and (max-width: 1024px) - #cont - top 2rem - - h1.lead - font-size 1.6rem - line-height 2rem - margin-bottom 3rem \ No newline at end of file + div.post + h2 + h3 + font-size .5rem + p + font-size 1rem + line-height 2rem + + li h1 + font-weight 400 + li::before + font-weight 200 \ No newline at end of file diff --git a/pub/tree/src/js/components/AnchorComponent.coffee b/pub/tree/src/js/components/AnchorComponent.coffee index 1622b8bc7b..8748dda07d 100644 --- a/pub/tree/src/js/components/AnchorComponent.coffee +++ b/pub/tree/src/js/components/AnchorComponent.coffee @@ -17,19 +17,17 @@ Links = React.createFactory query { head:'r' meta:'j' }, (recl - # {curr:'t',prev:'t,next:'t',onClick:'f'} displayName: "Links" - render: -> div {className:'links'}, @props.children, @_render() - _render: -> - sorted = true - keys = [] - for k,v of @props.kids - if not v.meta?.sort? then sorted = false - keys[Number(v.meta?.sort)] = k - if sorted isnt true - keys = _.keys(@props.kids).sort() - else - keys = _.values keys + render: -> div {className:'links'}, + @props.children, + @renderUp(), + @renderSibs(), + @renderArrows() + renderUp: -> + if @props.sein + div {id:"up",key:"up"}, @renderArrow "up", @props.sein + renderSibs: -> + keys = window.tree.util.getKeys @props.kids if keys.indexOf(@props.curr) isnt -1 style = {marginTop: -24 * (keys.indexOf @props.curr) + "px"} div {id:"sibs",style}, keys.map (key) => @@ -40,6 +38,27 @@ Links = React.createFactory query { head ||= key className = clas active: key is @props.curr (div {className,key}, (a {href,onClick:@props.onClick}, head)) + renderArrow: (name, path) -> + href = window.tree.basepath path + (a {href,key:"arow-#{name}",className:"arow-#{name}"},"") + renderArrows: -> + keys = window.tree.util.getKeys @props.kids + if keys.length > 1 + index = keys.indexOf(@props.curr) + prev = index-1 + next = index+1 + if prev < 0 then prev = keys.length-1 + if next is keys.length then next = 0 + prev = keys[prev] + next = keys[next] + if @props.sein + sein = @props.sein + if sein is "/" then sein = "" + if prev or next then _.filter [ + div {id:"sides",key:"sides"}, + if prev then @renderArrow "prev", "#{sein}/#{prev}" + if next then @renderArrow "next", "#{sein}/#{next}" + ] toText: (elem)-> reactify.walk elem, ()->'' @@ -50,8 +69,13 @@ Links = React.createFactory query { render: -> div {className:'links'}, @props.children, @_render() _render: -> div {id:"sibs"}, div {className:"active"}, a {}, @props.curr -CLICK = 'a,h1,h2,h3,h4,h5,h6' -module.exports = query {sein:'t',path:'t',name:'t',next:'t',prev:'t',meta:'j'},recl +CLICK = 'a' +module.exports = query { + sein:'t' + path:'t' + name:'t' + meta:'j' + },(recl displayName: "Anchor" getInitialState: -> url: window.location.pathname @@ -69,24 +93,21 @@ module.exports = query {sein:'t',path:'t',name:'t',next:'t',prev:'t',meta:'j'},r @setTitle() @interval = setInterval @checkURL,100 - $('body').on 'keyup', (e) => - switch e.keyCode - when 37 then @goTo @props.prev # left - when 39 then @goTo @props.next # right + # $('body').on 'keyup', (e) => + # switch e.keyCode + # when 37 then @goTo @props.prev # left + # when 39 then @goTo @props.next # right _this = @ $('body').on 'click', CLICK, (e) -> href = $(@).attr('href') id = $(@).attr('id') - if href?[0] is "/" + if href and not /^https?:\/\//i.test(href) e.preventDefault() e.stopPropagation() + if href?[0] isnt "/" + href = (document.location.pathname.replace /[^\/]*\/?$/, '') + href _this.goTo window.tree.fragpath href - else - e.preventDefault() - e.stopPropagation() - base = window.tree.fragpath(document.location.pathname) - _this.goTo base+"/#{href}" if id window.location.hash = id @@ -106,19 +127,23 @@ module.exports = query {sein:'t',path:'t',name:'t',next:'t',prev:'t',meta:'j'},r TreeActions.setCurr next React.render (BodyComponent {}, ""),$('#cont')[0] + reset: -> + $("html,body").animate {scrollTop:0} + # $("#cont").attr 'class','' + $('#nav').attr 'style','' + $('#nav').removeClass 'scrolling m-up' + $('#nav').addClass 'm-down m-fixed' + goTo: (path) -> @toggleFocus false - $("html,body").animate {scrollTop:0} + @reset() @setPath path checkURL: -> if @state.url isnt window.location.pathname + @reset() @setPath (window.tree.fragpath window.location.pathname),false @setState url: window.location.pathname - - renderArrow: (name, path) -> - href = window.tree.basepath path - (a {href,key:"arow-#{name}",className:"arow-#{name}"},"") render: -> if @props.meta.anchor is 'none' @@ -133,10 +158,5 @@ module.exports = query {sein:'t',path:'t',name:'t',next:'t',prev:'t',meta:'j'},r @onClick curr:@props.name dataPath:@props.sein - }, if @props.sein then _.filter [ - div {id:"up",key:"up"}, @renderArrow "up", @props.sein - if @props.prev or @props.next then _.filter [ - div {id:"sides",key:"sides"}, - if @props.prev then @renderArrow "prev", @props.prev - if @props.next then @renderArrow "next", @props.next - ] ] + sein:@props.sein + }), div diff --git a/pub/tree/src/js/components/BodyComponent.coffee b/pub/tree/src/js/components/BodyComponent.coffee index dfbdd5260e..ed6fe473b7 100644 --- a/pub/tree/src/js/components/BodyComponent.coffee +++ b/pub/tree/src/js/components/BodyComponent.coffee @@ -1,18 +1,63 @@ +clas = require 'classnames' + +logo = require './Logo.coffee' query = require './Async.coffee' reactify = require './Reactify.coffee' recl = React.createClass -{div} = React.DOM +{div,p,img,a} = React.DOM -module.exports = query {body:'r',path:'t',meta:'j'}, recl +Logo = React.createFactory recl render: -> + {color} = @props + if color is "white" or color is "black" # else? + src = "//storage.googleapis.com/urbit-extra/logo/logo-#{color}-100x100.png" + (img {src,className:"logo"}) + +Next = React.createFactory query { + path:'t' + kids: + name:'t' + head:'r' + meta:'j' + }, (recl + displayName: "Links" + render: -> + curr = @props.kids[@props.curr] + if curr?.meta?.next + keys = window.tree.util.getKeys @props.kids + if keys.length > 1 + index = keys.indexOf(@props.curr) + next = index+1 + if next is keys.length then next = 0 + next = keys[next] + next = @props.kids[next] + + (div {className:"link-next"}, [ + (a {href:"#{@props.path}/#{next.name}"}, "Next: #{next.meta.title}") + ]) + ) + +module.exports = query { + body:'r' + name:'t' + path:'t' + meta:'j' + sein:'t' +}, recl displayName: "Body" render: -> - $("#cont").attr 'class','' - if @props.meta.layout - $("#cont").attr 'class',@props.meta.layout.replace /,/g," " - (div {}, - (div { - id:'body', - key:"body"+@props.path}, - reactify @props.body) - ) + className = (@props.meta.layout?.replace /,/g," ") || "" + body = [reactify @props.body] + if @props.meta.logo? + body.unshift (Logo color:@props.meta.logo) + if @props.meta.next? + body.push Next {dataPath:@props.sein,curr:@props.name} + if @props.meta.footer? + body.push (div {className:"footer"}, [ + (p {}, "This page was served by Urbit.")]) + (div { + id:'body', + key:"body"+@props.path + className + }, + body) diff --git a/pub/tree/src/js/components/EmailComponent.coffee b/pub/tree/src/js/components/EmailComponent.coffee index f212fb10bb..8eb1cf39fb 100644 --- a/pub/tree/src/js/components/EmailComponent.coffee +++ b/pub/tree/src/js/components/EmailComponent.coffee @@ -38,7 +38,7 @@ module.exports = recl if @state.submit is false cont = [ (input {key:"field",className:"email",placeholder:"your@email.com",@onKeyUp}, @state.email) - (button {key:"submit",className:"submit",@onClick}, "Submit") + (button {key:"submit",className:"submit",@onClick}, "Sign up") ] else cont = [(div {className:"submitted"},"Got it. Thanks!")] diff --git a/pub/tree/src/js/components/ListComponent.coffee b/pub/tree/src/js/components/ListComponent.coffee index 12c85770f5..a0541fa4c1 100644 --- a/pub/tree/src/js/components/ListComponent.coffee +++ b/pub/tree/src/js/components/ListComponent.coffee @@ -43,6 +43,7 @@ module.exports = query { for item in _.values _keys path = @props.path+"/"+item elem = @props.kids[item] + if elem.meta.hide? then continue href = window.tree.basepath path if elem.meta.link then href = elem.meta.link parts = [] @@ -51,7 +52,7 @@ module.exports = query { title = gn: 'h1' c: [elem.meta.title] - if elem.head.c.length > 0 + if not title && elem.head.c.length > 0 title = elem.head if not title title = diff --git a/pub/tree/src/js/main.coffee b/pub/tree/src/js/main.coffee index b3a27a2451..33476892ae 100644 --- a/pub/tree/src/js/main.coffee +++ b/pub/tree/src/js/main.coffee @@ -13,11 +13,15 @@ $ -> window.tree._basepath += (window.location.pathname.replace window.tree._basepath, "").split("/")[0] window.tree.basepath = (path) -> + prefix = window.tree._basepath + if prefix is "/" then prefix = "" if path[0] isnt "/" then path = "/"+path - _path = window.tree._basepath + path + _path = prefix + path if _path.slice(-1) is "/" then _path = _path.slice(0,-1) _path - window.tree.fragpath = (path) -> path.replace window.tree._basepath,"" + window.tree.fragpath = (path) -> + path.replace(/\/$/,'') + .replace(window.tree._basepath,"") TreeActions = require './actions/TreeActions.coffee' TreePersistence = require './persistence/TreePersistence.coffee' @@ -30,6 +34,19 @@ $ -> rend (head {}, ""),$('#nav')[0] rend (body {}, ""),$('#cont')[0] + window.tree.util = + getKeys: (kids) -> + sorted = true + keys = [] + for k,v of kids + continue if v.meta?.hide + if not v.meta?.sort? then sorted = false + keys[Number(v.meta?.sort)] = k + if sorted isnt true + keys = _.keys(kids).sort() + else + keys = _.values keys + checkScroll = -> if $(window).scrollTop() > 20 $('#nav').addClass 'scrolling' @@ -47,11 +64,13 @@ $ -> if po.lm isnt null and po.cm isnt null po.cs = $(window).scrollTop() + db = $(window).height()-(po.cs+window.innerHeight) + ds = Math.abs po.cs-po.ls dx = Math.abs po.cm.x-po.lm.x dy = Math.abs po.cm.y-po.lm.y - $('#nav').toggleClass 'moving',(dx > 20 or dy > 20) + $('#nav').toggleClass 'moving',(dx > 20 or dy > 20 or db < 180) po.lm = po.cm po.ls = po.cs setInterval checkMove,200 diff --git a/pub/tree/src/js/main.js b/pub/tree/src/js/main.js index 23db46da8a..3998cdd6f5 100644 --- a/pub/tree/src/js/main.js +++ b/pub/tree/src/js/main.js @@ -35,7 +35,8 @@ module.exports = { }; -},{"../dispatcher/Dispatcher.coffee":14,"../persistence/TreePersistence.coffee":20}],2:[function(require,module,exports){ + +},{"../dispatcher/Dispatcher.coffee":15,"../persistence/TreePersistence.coffee":21}],2:[function(require,module,exports){ var BodyComponent, CLICK, Links, TreeActions, TreeStore, a, clas, div, query, reactify, recl, ref; clas = require('classnames'); @@ -66,25 +67,19 @@ Links = React.createFactory(query({ render: function() { return div({ className: 'links' - }, this.props.children, this._render()); + }, this.props.children, this.renderUp(), this.renderSibs(), this.renderArrows()); }, - _render: function() { - var k, keys, ref1, ref2, ref3, sorted, style, v; - sorted = true; - keys = []; - ref1 = this.props.kids; - for (k in ref1) { - v = ref1[k]; - if (((ref2 = v.meta) != null ? ref2.sort : void 0) == null) { - sorted = false; - } - keys[Number((ref3 = v.meta) != null ? ref3.sort : void 0)] = k; - } - if (sorted !== true) { - keys = _.keys(this.props.kids).sort(); - } else { - keys = _.values(keys); + renderUp: function() { + if (this.props.sein) { + return div({ + id: "up", + key: "up" + }, this.renderArrow("up", this.props.sein)); } + }, + renderSibs: function() { + var keys, style; + keys = window.tree.util.getKeys(this.props.kids); if (keys.indexOf(this.props.curr) !== -1) { style = { marginTop: -24 * (keys.indexOf(this.props.curr)) + "px" @@ -118,6 +113,46 @@ Links = React.createFactory(query({ }; })(this))); }, + renderArrow: function(name, path) { + var href; + href = window.tree.basepath(path); + return a({ + href: href, + key: "arow-" + name, + className: "arow-" + name + }, ""); + }, + renderArrows: function() { + var index, keys, next, prev, sein; + keys = window.tree.util.getKeys(this.props.kids); + if (keys.length > 1) { + index = keys.indexOf(this.props.curr); + prev = index - 1; + next = index + 1; + if (prev < 0) { + prev = keys.length - 1; + } + if (next === keys.length) { + next = 0; + } + prev = keys[prev]; + next = keys[next]; + } + if (this.props.sein) { + sein = this.props.sein; + if (sein === "/") { + sein = ""; + } + if (prev || next) { + return _.filter([ + div({ + id: "sides", + key: "sides" + }, prev ? this.renderArrow("prev", sein + "/" + prev) : void 0, next ? this.renderArrow("next", sein + "/" + next) : void 0) + ]); + } + } + }, toText: function(elem) { return reactify.walk(elem, function() { return ''; @@ -145,14 +180,12 @@ Links = React.createFactory(query({ } }))); -CLICK = 'a,h1,h2,h3,h4,h5,h6'; +CLICK = 'a'; module.exports = query({ sein: 't', path: 't', name: 't', - next: 't', - prev: 't', meta: 'j' }, recl({ displayName: "Anchor", @@ -191,30 +224,18 @@ module.exports = query({ var _this; this.setTitle(); this.interval = setInterval(this.checkURL, 100); - $('body').on('keyup', (function(_this) { - return function(e) { - switch (e.keyCode) { - case 37: - return _this.goTo(_this.props.prev); - case 39: - return _this.goTo(_this.props.next); - } - }; - })(this)); _this = this; return $('body').on('click', CLICK, function(e) { - var base, href, id; + var href, id; href = $(this).attr('href'); id = $(this).attr('id'); - if ((href != null ? href[0] : void 0) === "/") { + if (href && !/^https?:\/\//i.test(href)) { e.preventDefault(); e.stopPropagation(); + if ((href != null ? href[0] : void 0) !== "/") { + href = (document.location.pathname.replace(/[^\/]*\/?$/, '')) + href; + } _this.goTo(window.tree.fragpath(href)); - } else { - e.preventDefault(); - e.stopPropagation(); - base = window.tree.fragpath(document.location.pathname); - _this.goTo(base + ("/" + href)); } if (id) { return window.location.hash = id; @@ -243,30 +264,28 @@ module.exports = query({ return React.render(BodyComponent({}, ""), $('#cont')[0]); } }, - goTo: function(path) { - this.toggleFocus(false); + reset: function() { $("html,body").animate({ scrollTop: 0 }); + $('#nav').attr('style', ''); + $('#nav').removeClass('scrolling m-up'); + return $('#nav').addClass('m-down m-fixed'); + }, + goTo: function(path) { + this.toggleFocus(false); + this.reset(); return this.setPath(path); }, checkURL: function() { if (this.state.url !== window.location.pathname) { + this.reset(); this.setPath(window.tree.fragpath(window.location.pathname), false); return this.setState({ url: window.location.pathname }); } }, - renderArrow: function(name, path) { - var href; - href = window.tree.basepath(path); - return a({ - href: href, - key: "arow-" + name, - className: "arow-" + name - }, ""); - }, render: function() { var obj; if (this.props.meta.anchor === 'none') { @@ -286,23 +305,15 @@ module.exports = query({ return div(obj, Links({ onClick: this.onClick, curr: this.props.name, - dataPath: this.props.sein - }, this.props.sein ? _.filter([ - div({ - id: "up", - key: "up" - }, this.renderArrow("up", this.props.sein)), this.props.prev || this.props.next ? _.filter([ - div({ - id: "sides", - key: "sides" - }, this.props.prev ? this.renderArrow("prev", this.props.prev) : void 0, this.props.next ? this.renderArrow("next", this.props.next) : void 0) - ]) : void 0 - ]) : void 0)); + dataPath: this.props.sein, + sein: this.props.sein + })); } -})); +}), div); -},{"../actions/TreeActions.coffee":1,"../stores/TreeStore.coffee":21,"./Async.coffee":3,"./BodyComponent.coffee":4,"./Reactify.coffee":11,"classnames":16}],3:[function(require,module,exports){ + +},{"../actions/TreeActions.coffee":1,"../stores/TreeStore.coffee":22,"./Async.coffee":3,"./BodyComponent.coffee":4,"./Reactify.coffee":12,"classnames":17}],3:[function(require,module,exports){ var TreeActions, TreeStore, _load, code, div, recl, ref, span; _load = require('./LoadComponent.coffee'); @@ -412,8 +423,13 @@ module.exports = function(queries, Child, load) { }; -},{"../actions/TreeActions.coffee":1,"../stores/TreeStore.coffee":21,"./LoadComponent.coffee":10}],4:[function(require,module,exports){ -var div, query, reactify, recl; + +},{"../actions/TreeActions.coffee":1,"../stores/TreeStore.coffee":22,"./LoadComponent.coffee":10}],4:[function(require,module,exports){ +var Logo, Next, a, clas, div, img, logo, p, query, reactify, recl, ref; + +clas = require('classnames'); + +logo = require('./Logo.coffee'); query = require('./Async.coffee'); @@ -421,28 +437,95 @@ reactify = require('./Reactify.coffee'); recl = React.createClass; -div = React.DOM.div; +ref = React.DOM, div = ref.div, p = ref.p, img = ref.img, a = ref.a; + +Logo = React.createFactory(recl({ + render: function() { + var color, src; + color = this.props.color; + if (color === "white" || color === "black") { + src = "//storage.googleapis.com/urbit-extra/logo/logo-" + color + "-100x100.png"; + } + return img({ + src: src, + className: "logo" + }); + } +})); + +Next = React.createFactory(query({ + path: 't', + kids: { + name: 't', + head: 'r', + meta: 'j' + } +}, recl({ + displayName: "Links", + render: function() { + var curr, index, keys, next, ref1; + curr = this.props.kids[this.props.curr]; + if (curr != null ? (ref1 = curr.meta) != null ? ref1.next : void 0 : void 0) { + keys = window.tree.util.getKeys(this.props.kids); + if (keys.length > 1) { + index = keys.indexOf(this.props.curr); + next = index + 1; + if (next === keys.length) { + next = 0; + } + next = keys[next]; + next = this.props.kids[next]; + return div({ + className: "link-next" + }, [ + a({ + href: this.props.path + "/" + next.name + }, "Next: " + next.meta.title) + ]); + } + } + } +}))); module.exports = query({ body: 'r', + name: 't', path: 't', - meta: 'j' + meta: 'j', + sein: 't' }, recl({ displayName: "Body", render: function() { - $("#cont").attr('class', ''); - if (this.props.meta.layout) { - $("#cont").attr('class', this.props.meta.layout.replace(/,/g, " ")); + var body, className, ref1; + className = ((ref1 = this.props.meta.layout) != null ? ref1.replace(/,/g, " ") : void 0) || ""; + body = [reactify(this.props.body)]; + if (this.props.meta.logo != null) { + body.unshift(Logo({ + color: this.props.meta.logo + })); } - return div({}, div({ + if (this.props.meta.next != null) { + body.push(Next({ + dataPath: this.props.sein, + curr: this.props.name + })); + } + if (this.props.meta.footer != null) { + body.push(div({ + className: "footer" + }, [p({}, "This page was served by Urbit.")])); + } + return div({ id: 'body', - key: "body" + this.props.path - }, reactify(this.props.body))); + key: "body" + this.props.path, + className: className + }, body); } })); -},{"./Async.coffee":3,"./Reactify.coffee":11}],5:[function(require,module,exports){ + +},{"./Async.coffee":3,"./Logo.coffee":11,"./Reactify.coffee":12,"classnames":17}],5:[function(require,module,exports){ var div, recl, ref, textarea; recl = React.createClass; @@ -465,6 +548,7 @@ module.exports = recl({ }); + },{}],6:[function(require,module,exports){ var div, recl; @@ -487,7 +571,8 @@ module.exports = { }; -},{"./CodeMirror.coffee":5,"./EmailComponent.coffee":7,"./KidsComponent.coffee":8,"./ListComponent.coffee":9,"./SearchComponent.coffee":12,"./TocComponent.coffee":13}],7:[function(require,module,exports){ + +},{"./CodeMirror.coffee":5,"./EmailComponent.coffee":7,"./KidsComponent.coffee":8,"./ListComponent.coffee":9,"./SearchComponent.coffee":13,"./TocComponent.coffee":14}],7:[function(require,module,exports){ var button, div, input, p, reactify, recl, ref; reactify = require('./Reactify.coffee'); @@ -551,7 +636,7 @@ module.exports = recl({ key: "submit", className: "submit", onClick: this.onClick - }, "Submit") + }, "Sign up") ]; } else { cont = [ @@ -567,7 +652,8 @@ module.exports = recl({ }); -},{"./Reactify.coffee":11}],8:[function(require,module,exports){ + +},{"./Reactify.coffee":12}],8:[function(require,module,exports){ var a, div, hr, li, query, reactify, recl, ref, ul; reactify = require('./Reactify.coffee'); @@ -638,7 +724,8 @@ module.exports = query({ })); -},{"./Async.coffee":3,"./Reactify.coffee":11}],9:[function(require,module,exports){ + +},{"./Async.coffee":3,"./Reactify.coffee":12}],9:[function(require,module,exports){ var a, clas, div, h1, li, query, reactify, recl, ref, ul; clas = require('classnames'); @@ -709,6 +796,9 @@ module.exports = query({ item = ref5[i]; path = this.props.path + "/" + item; elem = this.props.kids[item]; + if (elem.meta.hide != null) { + continue; + } href = window.tree.basepath(path); if (elem.meta.link) { href = elem.meta.link; @@ -721,7 +811,7 @@ module.exports = query({ c: [elem.meta.title] }; } - if (elem.head.c.length > 0) { + if (!title && elem.head.c.length > 0) { title = elem.head; } if (!title) { @@ -782,7 +872,8 @@ module.exports = query({ })); -},{"./Async.coffee":3,"./Reactify.coffee":11,"classnames":16}],10:[function(require,module,exports){ + +},{"./Async.coffee":3,"./Reactify.coffee":12,"classnames":17}],10:[function(require,module,exports){ var div, input, recl, ref, textarea; recl = React.createClass; @@ -822,7 +913,13 @@ module.exports = recl({ }); + },{}],11:[function(require,module,exports){ + + + + +},{}],12:[function(require,module,exports){ var Virtual, div, load, reactify, recl, ref, rele, span, walk; recl = React.createClass; @@ -889,7 +986,8 @@ module.exports = _.extend(reactify, { }); -},{"./LoadComponent.coffee":10}],12:[function(require,module,exports){ + +},{"./LoadComponent.coffee":10}],13:[function(require,module,exports){ var a, div, input, query, reactify, recl, ref, slice = [].slice; @@ -1027,7 +1125,8 @@ module.exports = query({ })); -},{"./Async.coffee":3,"./Reactify.coffee":11}],13:[function(require,module,exports){ + +},{"./Async.coffee":3,"./Reactify.coffee":12}],14:[function(require,module,exports){ var div, query, reactify, recl, slice = [].slice; @@ -1155,7 +1254,8 @@ module.exports = query({ })); -},{"./Async.coffee":3,"./Reactify.coffee":11}],14:[function(require,module,exports){ + +},{"./Async.coffee":3,"./Reactify.coffee":12}],15:[function(require,module,exports){ var Dispatcher; Dispatcher = require('flux').Dispatcher; @@ -1176,7 +1276,8 @@ module.exports = _.extend(new Dispatcher(), { }); -},{"flux":17}],15:[function(require,module,exports){ + +},{"flux":18}],16:[function(require,module,exports){ var rend; rend = React.render; @@ -1191,18 +1292,22 @@ $(function() { window.tree._basepath = window.urb.util.basepath("/"); window.tree._basepath += (window.location.pathname.replace(window.tree._basepath, "")).split("/")[0]; window.tree.basepath = function(path) { - var _path; + var _path, prefix; + prefix = window.tree._basepath; + if (prefix === "/") { + prefix = ""; + } if (path[0] !== "/") { path = "/" + path; } - _path = window.tree._basepath + path; + _path = prefix + path; if (_path.slice(-1) === "/") { _path = _path.slice(0, -1); } return _path; }; window.tree.fragpath = function(path) { - return path.replace(window.tree._basepath, ""); + return path.replace(/\/$/, '').replace(window.tree._basepath, ""); }; TreeActions = require('./actions/TreeActions.coffee'); TreePersistence = require('./persistence/TreePersistence.coffee'); @@ -1211,6 +1316,28 @@ $(function() { TreeActions.loadPath(frag, window.tree.body, window.tree.kids); rend(head({}, ""), $('#nav')[0]); rend(body({}, ""), $('#cont')[0]); + window.tree.util = { + getKeys: function(kids) { + var k, keys, ref, ref1, ref2, sorted, v; + sorted = true; + keys = []; + for (k in kids) { + v = kids[k]; + if ((ref = v.meta) != null ? ref.hide : void 0) { + continue; + } + if (((ref1 = v.meta) != null ? ref1.sort : void 0) == null) { + sorted = false; + } + keys[Number((ref2 = v.meta) != null ? ref2.sort : void 0)] = k; + } + if (sorted !== true) { + return keys = _.keys(kids).sort(); + } else { + return keys = _.values(keys); + } + } + }; checkScroll = function() { if ($(window).scrollTop() > 20) { return $('#nav').addClass('scrolling'); @@ -1231,13 +1358,14 @@ $(function() { }; }); checkMove = function() { - var ds, dx, dy; + var db, ds, dx, dy; if (po.lm !== null && po.cm !== null) { po.cs = $(window).scrollTop(); + db = $(window).height() - (po.cs + window.innerHeight); ds = Math.abs(po.cs - po.ls); dx = Math.abs(po.cm.x - po.lm.x); dy = Math.abs(po.cm.y - po.lm.y); - $('#nav').toggleClass('moving', dx > 20 || dy > 20); + $('#nav').toggleClass('moving', dx > 20 || dy > 20 || db < 180); } po.lm = po.cm; return po.ls = po.cs; @@ -1322,7 +1450,8 @@ $(function() { }); -},{"./actions/TreeActions.coffee":1,"./components/AnchorComponent.coffee":2,"./components/BodyComponent.coffee":4,"./components/Components.coffee":6,"./persistence/TreePersistence.coffee":20}],16:[function(require,module,exports){ + +},{"./actions/TreeActions.coffee":1,"./components/AnchorComponent.coffee":2,"./components/BodyComponent.coffee":4,"./components/Components.coffee":6,"./persistence/TreePersistence.coffee":21}],17:[function(require,module,exports){ /*! Copyright (c) 2015 Jed Watson. Licensed under the MIT License (MIT), see @@ -1372,7 +1501,7 @@ $(function() { } }()); -},{}],17:[function(require,module,exports){ +},{}],18:[function(require,module,exports){ /** * Copyright (c) 2014-2015, Facebook, Inc. * All rights reserved. @@ -1384,7 +1513,7 @@ $(function() { module.exports.Dispatcher = require('./lib/Dispatcher') -},{"./lib/Dispatcher":18}],18:[function(require,module,exports){ +},{"./lib/Dispatcher":19}],19:[function(require,module,exports){ /* * Copyright (c) 2014, Facebook, Inc. * All rights reserved. @@ -1636,7 +1765,7 @@ var _prefix = 'ID_'; module.exports = Dispatcher; -},{"./invariant":19}],19:[function(require,module,exports){ +},{"./invariant":20}],20:[function(require,module,exports){ /** * Copyright (c) 2014, Facebook, Inc. * All rights reserved. @@ -1691,7 +1820,7 @@ var invariant = function(condition, format, a, b, c, d, e, f) { module.exports = invariant; -},{}],20:[function(require,module,exports){ +},{}],21:[function(require,module,exports){ var dedup; dedup = {}; @@ -1749,7 +1878,8 @@ module.exports = { }; -},{}],21:[function(require,module,exports){ + +},{}],22:[function(require,module,exports){ var EventEmitter, MessageDispatcher, QUERIES, TreeStore, _curr, _data, _tree, clog; EventEmitter = require('events').EventEmitter; @@ -1786,6 +1916,9 @@ TreeStore = _.extend(EventEmitter.prototype, { return _path.split("/"); }, fulfill: function(path, query) { + if (path === "/") { + path = ""; + } return this.fulfillAt(this.getTree(path.split('/')), path, query); }, fulfillAt: function(tree, path, query) { @@ -1971,7 +2104,8 @@ TreeStore.dispatchToken = MessageDispatcher.register(function(payload) { module.exports = TreeStore; -},{"../dispatcher/Dispatcher.coffee":14,"events":22}],22:[function(require,module,exports){ + +},{"../dispatcher/Dispatcher.coffee":15,"events":23}],23:[function(require,module,exports){ // Copyright Joyent, Inc. and other Node contributors. // // Permission is hereby granted, free of charge, to any person obtaining a @@ -2274,4 +2408,4 @@ function isUndefined(arg) { return arg === void 0; } -},{}]},{},[15]); +},{}]},{},[16]); diff --git a/pub/tree/src/js/stores/TreeStore.coffee b/pub/tree/src/js/stores/TreeStore.coffee index f98553fb65..2623093045 100644 --- a/pub/tree/src/js/stores/TreeStore.coffee +++ b/pub/tree/src/js/stores/TreeStore.coffee @@ -19,6 +19,7 @@ TreeStore = _.extend EventEmitter.prototype, { pathToArr: (_path) -> _path.split "/" fulfill: (path,query) -> + if path is "/" then path = "" @fulfillAt (@getTree path.split '/'),path,query fulfillAt: (tree,path,query)-> data = @fulfillLocal path, query