bazel-based continuous integration flow (#21)

This commit is contained in:
Matthew LeVan 2023-01-09 14:02:35 -05:00 committed by Peter McEvoy
parent 78fdc2edd4
commit cadf27fb85
29 changed files with 669 additions and 974 deletions

View File

@ -9,10 +9,17 @@ build --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
# https://github.com/bazelbuild/bazel/issues/7260.
build --incompatible_enable_cc_toolchain_resolution
# Disable transitions.
# See https://github.com/bazelbuild/rules_docker/issues/2052.
build --@io_bazel_rules_docker//transitions:enable=false
# Add aliases for compiler version build settings.
build --flag_alias=aarch64_linux_gnu_gcc_version=//:aarch64_linux_gnu_gcc_version
build --flag_alias=clang_version=//:clang_version
build --flag_alias=gcc_version=//:gcc_version
# Add alias for Docker image tag.
build --flag_alias=image_tag=//:image_tag
# Any personal configuration should go in .user.bazelrc.
try-import %workspace%/.user.bazelrc

View File

@ -1,42 +1,15 @@
name: build
on:
push:
paths:
- '.github/workflows/build.yml'
- '.github/workflows/vere.yml'
- 'pkg/arvo/**'
- 'pkg/docker-image/**'
- 'pkg/ent/**'
- 'pkg/ge-additions/**'
- 'pkg/libaes_siv/**'
- 'pkg/urbit/**'
- 'pkg/urcrypt/**'
- 'bin/**'
- 'nix/**'
- 'default.nix'
pull_request:
paths:
- '.github/workflows/build.yml'
- '.github/workflows/vere.yml'
- 'pkg/arvo/**'
- 'pkg/docker-image/**'
- 'pkg/ent/**'
- 'pkg/ge-additions/**'
- 'pkg/libaes_siv/**'
- 'pkg/urbit/**'
- 'pkg/urcrypt/**'
- 'bin/**'
- 'nix/**'
- 'default.nix'
- '.bazelrc'
- '.github/workflows/*.yml'
- 'BUILD.bazel'
- 'WORKSPACE.bazel'
- 'bazel/**'
- 'pkg/**'
jobs:
call-vere:
uses: ./.github/workflows/vere.yml
with:
pace: 'edge' # XX s/b once?
upload: >-
${{
(github.ref_name == 'next/vere' && github.ref_type == 'branch')
}}
secrets: inherit
uses: ./.github/workflows/shared.yml

View File

@ -1,17 +0,0 @@
name: ops-merge
on:
push:
branches:
- 'release/*'
jobs:
merge-release-to-ops:
runs-on: ubuntu-latest
name: "Merge to ops-tlon"
steps:
- uses: actions/checkout@v2
- uses: devmasx/merge-branch@v1.3.1
with:
type: now
target_branch: ops-tlon
github_token: ${{ secrets.JANEWAY_BOT_TOKEN }}

View File

@ -7,8 +7,8 @@ on:
jobs:
call-vere:
uses: ./.github/workflows/vere.yml
uses: ./.github/workflows/shared.yml
with:
pace: 'soon'
upload: true
secrets: inherit
secrets: inherit

View File

@ -1,51 +0,0 @@
name: release-docker
on:
release: null
push:
tags: 'urbit-v[0-9]+.[0-9]+'
jobs:
upload:
strategy:
matrix:
include:
- { os: ubuntu-latest, system: x86_64-linux }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: cachix/install-nix-action@v16
with:
extra_nix_config: |
system-features = nixos-test benchmark big-parallel kvm
- uses: cachix/cachix-action@v10
with:
name: ares
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- uses: docker/docker-login-action@v1.8.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: christian-korneck/update-container-description-action@v1
env:
DOCKER_USER: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASS: ${{ secrets.DOCKERHUB_TOKEN }}
with:
destination_container_repo: ${{ secrets.DOCKERHUB_USERNAME }}/urbit
provider: dockerhub
short_description: 'Urbit: a clean-slate OS and network for the 21st century'
readme_file: 'pkg/docker-image/README.md'
- run: |
version="$(cat ./pkg/urbit/version)"
image="$(nix-build -A docker-image)"
imageName="$(nix-instantiate --eval -A docker-image.imageName | cut -d'"' -f2)"
imageTag="$(nix-instantiate --eval -A docker-image.imageTag | cut -d'"' -f2)"
# Load the image from the nix-built tarball
docker load -i $image
docker tag "$imageName:$imageTag" ${{secrets.DOCKERHUB_USERNAME }}/urbit:v$version
docker tag "$imageName:$imageTag" ${{secrets.DOCKERHUB_USERNAME }}/urbit:latest
docker push ${{secrets.DOCKERHUB_USERNAME }}/urbit:v$version
docker push ${{secrets.DOCKERHUB_USERNAME }}/urbit:latest

View File

@ -7,8 +7,9 @@ on:
jobs:
call-vere:
uses: ./.github/workflows/vere.yml
uses: ./.github/workflows/shared.yml
with:
docker: true
pace: 'live'
upload: true
secrets: inherit

182
.github/workflows/shared.yml vendored Normal file
View File

@ -0,0 +1,182 @@
name: shared
on:
workflow_call:
inputs:
docker:
description: 'Build and upload image to Docker Hub'
type: boolean
default: false
required: false
pace:
description: 'Release pace'
type: string
default: 'edge'
required: false
upload:
description: 'Upload binaries to GCP'
type: boolean
default: false
required: false
secrets:
GCP_CREDENTIALS:
required: false
GCP_PROJECT:
required: false
env:
UPLOAD_BASE: bootstrap.urbit.org/vere-test
VERE_PACE: ${{ inputs.pace }}
VERSION_TYPE: ${{ (inputs.pace == 'soon' || inputs.pace == 'live') && 'real' || 'hash' }}
jobs:
urbit:
strategy:
fail-fast: false
matrix:
include:
- { target: linux-arm64, runner: ubuntu-22.04 }
- { target: linux-x86_64, runner: ubuntu-22.04 }
- { target: macos-x86_64, runner: macos-12 }
runs-on: ${{ matrix.runner }}
steps:
#
# BUILD AND TEST
#
- uses: actions/checkout@v3
- name: Set up build cache
uses: actions/cache@v3
with:
key: ${{ matrix.target }}-cache
path: |
# It would be nice to cache the Bazel cache, but the total GitHub
# cache size is 10GB per repository, and the musl libc toolchains
# take much longer to build than anything in the Bazel cache, so we
# only cache the musl libc toolchains.
# # Cache bazel path on Linux.
# ~/.cache/bazel/_bazel_runner
# # Cache bazel path on macOS.
# /private/var/tmp/_bazel_runner
# Cache musl libc toolchains.
/usr/local/*-musl
- name: Build and test
run: |
# Build the binary.
echo "${{ inputs.pace }}" > ./pkg/vere/PACE
params=()
case "${{ matrix.target }}" in
"linux-arm64")
bazel run //bazel/toolchain:aarch64-linux-musl-gcc
params+=(
--platforms=//:linux-arm64
)
;;
"linux-x86_64")
bazel run //bazel/toolchain:x86_64-linux-musl-gcc
;;
"macos-x86_64")
# Switch Xcode path to match the path specified in our bazel toolchain.
sudo xcode-select --switch /Library/Developer/CommandLineTools
brew install automake libtool
brew_clang="//bazel/toolchain:brew-clang-macos-x86_64-toolchain"
params+=(
--clang_version=14.0.6
--extra_toolchains=$brew_clang
)
;;
*)
echo "Unsupported target: ${{ matrix.target }}"
exit 1
;;
esac
bazel build "${params[@]}" :urbit
# Prepare binary for upload to GCP in future step.
echo "$GITHUB_WORKSPACE/bazel-bin/pkg/vere/urbit"
echo "urbit_static=$GITHUB_WORKSPACE/bazel-bin/pkg/vere/urbit" >> $GITHUB_ENV
# We have no way of running the linux-arm64 tests on a linux-x86_64
# machine.
if [[ "${{ matrix.target }}" != "linux-arm64" ]]; then
params+=(
--build_tests_only
)
bazel test ... "${params[@]}"
fi
# Test the binary and run fakeship tests.
# See https://github.com/urbit/vere/issues/15.
# See https://github.com/urbit/vere/issues/40.
if [[ "${{ matrix.target }}" == "linux-x86_64" ]]; then
bazel build //pkg/vere:test-fake-ship
fi
#
# UPLOAD TO GCP
#
- uses: google-github-actions/auth@v1
if: inputs.upload
with:
credentials_json: ${{ secrets.GCP_CREDENTIALS }}
- uses: google-github-actions/setup-gcloud@v1
if: inputs.upload
with:
project_id: ${{ secrets.GCP_PROJECT }}
- name: Upload binaries to bootstrap.urbit.org
if: inputs.upload
run: |
if [ "real" == "$VERSION_TYPE" ]; then
version="$(cat ./pkg/vere/VERSION)"
else
version="${GITHUB_SHA:0:9}"
fi
os=$(echo "${{ matrix.target }}" | cut -d'-' -f1)
arch=$(echo "${{ matrix.target }}" | cut -d'-' -f2)
system=${arch}-${os}
target="gs://${UPLOAD_BASE}/${VERE_PACE}/${version}/vere-v${version}-${system}"
gsutil cp -n "${{ env.urbit_static }}" "$target"
exitcode=$?
[ $exitcode -eq 0 ] &&
echo "upload to $target complete." ||
echo "upload to $target failed.";
exit $exitcode
#
# DOCKER
#
- uses: docker/docker-login-action@v1.8.0
if: inputs.docker
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: christian-korneck/update-container-description-action@v1
if: inputs.docker
env:
DOCKER_USER: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASS: ${{ secrets.DOCKERHUB_TOKEN }}
with:
destination_container_repo: ${{ secrets.DOCKERHUB_USERNAME }}/urbit
provider: dockerhub
short_description: 'Urbit: a clean-slate OS and network for the 21st century'
readme_file: 'DOCKER.md'
- name: Build and push Docker image
if: inputs.docker && matrix.target == 'linux-x86_64'
run: |
if [ "real" == "$VERSION_TYPE" ]; then
version="$(cat ./pkg/vere/VERSION)"
else
version="${GITHUB_SHA:0:9}"
fi
bazel run :upload_docker --image_tag=v$version
bazel run :upload_docker --image_tag=latest

View File

@ -1,44 +0,0 @@
name: tarballs
on:
release: null
push:
tags: ['*']
jobs:
upload:
strategy:
matrix:
include:
- { os: ubuntu-latest, system: x86_64-linux }
- { os: macos-latest, system: x86_64-darwin }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: cachix/install-nix-action@v16
- uses: cachix/cachix-action@v10
with:
name: ${{ secrets.CACHIX_NAME }}
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
- uses: google-github-actions/setup-gcloud@v0.2.0
with:
version: '290.0.1'
service_account_key: ${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}
project_id: ${{ secrets.GCS_PROJECT }}
export_default_credentials: true
- run: nix-build -A tarball --arg enableStatic true
- name: Run upload to bootstrap.urbit.org
run: |
version="$(cat ./pkg/urbit/version)"
system="$(nix-instantiate --eval --expr 'builtins.currentSystem')"
system=${system:1:${#system}-2}
target="gs://bootstrap.urbit.org/ci/urbit-v${version}-${system}-${GITHUB_SHA:0:9}.tgz"
gsutil cp -n ./result "$target"
echo "upload to $target complete."

View File

@ -1,194 +0,0 @@
name: vere
on:
workflow_call:
inputs:
upload:
description: 'upload binaries to gcp'
type: boolean
default: false
required: false
pace:
description: 'release pace'
type: string
default: 'edge'
required: false
secrets:
CACHIX_AUTH_TOKEN:
required: false
GCP_CREDENTIALS:
required: false
GCS_SERVICE_ACCOUNT_KEY:
required: false
GCS_PROJECT:
required: false
workflow_dispatch:
inputs:
upload:
description: 'upload binaries to gcp'
type: boolean
default: false
required: false
pace:
description: 'release pace'
type: choice
options:
- edge
- soon
- live
env:
UPLOAD_BASE: bootstrap.urbit.org/vere
VERE_PACE: ${{ inputs.pace }}
VERSION_TYPE: ${{ (inputs.pace == 'soon' || inputs.pace == 'live') && 'real' || 'hash' }}
jobs:
urbit:
strategy:
fail-fast: false
matrix:
include:
- { os: ubuntu-latest, type: linux }
- { os: macos-latest, type: macos }
- { os: buildjet-4vcpu-ubuntu-2204-arm, type: linux }
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
# We only want the extra nix config on linux, where it is necessary
# for the docker build. We don't want in on Mac, where it isn't but
# it breaks the nix install. The two `if` clauses should be mutually
# exclusive
- uses: cachix/install-nix-action@v16
with:
extra_nix_config: |
system-features = nixos-test benchmark big-parallel kvm
if: ${{ matrix.type == 'linux' }}
- uses: cachix/install-nix-action@v16
if: ${{ matrix.os != 'ubuntu-latest' }}
- uses: cachix/cachix-action@v10
with:
name: ares
authToken: ${{ secrets.CACHIX_AUTH_TOKEN }}
# run unit tests early on linux (x-compilation will skip them)
- name: build dynamic binary (and run tests)
if: ${{ matrix.type == 'linux' }}
run: nix-build -A urbit
- name: build static binary
run: |
nix-build -A urbit \
--arg enableStatic true \
--argstr verePace ${{ env.VERE_PACE }} > ./urbit-derivation
cat ./urbit-derivation
echo -n "urbit_static=" >> $GITHUB_ENV
cat ./urbit-derivation >> $GITHUB_ENV
cat ./urbit-derivation
- name: confirm binary is mostly static
if: matrix.type == 'macos'
run: |
bin="${{ env.urbit_static }}/bin/urbit"
if [ ! -f "$bin" ]; then
echo "no binary at $bin"
exit 1;
fi
libs="$(otool -L "${{ env.urbit_static }}/bin/urbit" | tail -n +2)"
# XX CoreFoundation?
if [ -z "$(echo "$libs" | grep -v libSystem)" ]; then
echo "it's mostly static"
echo "$libs"
exit 0
else
echo "dynamic links found:"
echo "$libs"
exit 1
fi
- name: get version string
run: |
if [ "real" == "$VERSION_TYPE" ]; then
version="$(cat ./pkg/urbit/version)"
else
version="${GITHUB_SHA:0:9}"
fi
echo -n "$version" > ./version-string
- name: upload version string artifact
if: matrix.type == 'linux'
uses: actions/upload-artifact@v3
with:
name: version-string
path: version-string
- uses: google-github-actions/auth@v1
with:
credentials_json: ${{ secrets.GCP_CREDENTIALS }}
- uses: google-github-actions/setup-gcloud@v1
if: inputs.upload
with:
project_id: ${{ secrets.GCS_PROJECT }}
- name: upload binary to bootstrap.urbit.org
if: inputs.upload
run: |
version="$(cat ./version-string)"
system="$(nix-instantiate --eval --expr 'builtins.currentSystem')"
system=${system:1:${#system}-2}
target="gs://${UPLOAD_BASE}/${VERE_PACE}/${version}/vere-v${version}-${system}"
gsutil cp -n "${{ env.urbit_static }}/bin/urbit" "$target"
exitcode=$?
test $exitcode -eq 0 &&
echo "upload to $target complete." ||
echo "upload to $target failed.";
exit $exitcode
- if: ${{ matrix.os == 'ubuntu-latest' }}
run: nix-build -A urbit-tests
- if: ${{ matrix.os == 'ubuntu-latest' }}
run: nix-build -A docker-image
after:
runs-on: ubuntu-latest
needs: [urbit]
if: inputs.upload
steps:
- uses: google-github-actions/setup-gcloud@v0.2.0
with:
version: '290.0.1'
service_account_key: ${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}
project_id: ${{ secrets.GCS_PROJECT }}
export_default_credentials: true
- name: download version-string
uses: actions/download-artifact@v3
with:
name: version-string
- name: update latest deployed version
run: |
target="gs://${UPLOAD_BASE}/${VERE_PACE}/last"
# *not* -n, as we want to overwrite the latest version-string
#
gsutil cp ./version-string "$target"
exitcode=$?
test $exitcode -eq 0 &&
echo "upload to $target complete." ||
echo "upload to $target failed.";
exit $exitcode

View File

@ -1,4 +1,6 @@
load("//bazel:common_settings.bzl", "string_flag")
load("@io_bazel_rules_docker//cc:image.bzl", "cc_image")
load("@io_bazel_rules_docker//container:container.bzl", "container_push")
#
# OS-CPU CONFIG SETTINGS
@ -36,22 +38,6 @@ config_setting(
],
)
config_setting(
name = "openbsd_x86_64",
constraint_values = [
"@platforms//os:openbsd",
"@platforms//cpu:x86_64",
],
)
config_setting(
name = "windows_x86_64",
constraint_values = [
"@platforms//os:windows",
"@platforms//cpu:x86_64",
],
)
#
# COMPILERS
#
@ -72,6 +58,13 @@ string_flag(
visibility = ["//visibility:public"],
)
# Docker image tag.
string_flag(
name = "image_tag",
build_setting_default = "",
visibility = ["//visibility:public"],
)
#
# PLATFORMS
#
@ -144,3 +137,21 @@ alias(
name = "urbit",
actual = "//pkg/vere:urbit",
)
#
# DOCKER IMAGE
#
cc_image(
name = "docker",
binary = "//pkg/vere:urbit",
)
container_push(
name = "upload_docker",
format = "Docker",
image = ":docker",
registry = "docker.io",
repository = "tloncorp/vere-test",
tag = "//:image_tag",
)

View File

@ -2,7 +2,7 @@
## Workflow
Before beginning any unit of work, you should have ensure you have an issue
Before beginning any unit of work, you should ensure you have a GitHub issue
detailing the scope of the work. This could be an issue someone else filed and
has been assigned to you (or you've assigned to yourself) or a new issue you
filed specifically for this unit of work. As much as possible, discussion of the
@ -32,7 +32,7 @@ Optionally add a long description.
### Pull Requests and Merges
When your work is ready for review, open up a pull request, making sure to link
When your work is ready for review, open a pull request, making sure to link
to the tracking issue in the description, which should be formatted as follows
(where `N` is the number of this work's tracking issue):
@ -63,7 +63,7 @@ request.
Although you likely have an identity on the live network, developing on the live
network is high-risk and largely unnecessary. Instead, standard practice is to
work on a fake ship. Fake ships use deterministic keys derived from the ship's
address; don't communicate on the live network; and can communicate with other
address, don't communicate on the live network, and can communicate with other
fake ships over the local loopback.
### Boot a New Fake Ship
@ -78,9 +78,9 @@ $ ./urbit -F <ship>
```
By default, booting a fake ship will use the same pre-compiled kernelspace--
a "pill"--that livenet ships use, which leads to a non-trivial boot time on the
order of tens of minutes. However, using a development specific pill--a "solid"
pill--the time to boot a new fake ship can be reduced to a few minutes.
a "pill"-- that livenet ships use, which leads to a non-trivial boot time on the
order of tens of minutes. However, using a development specific pill-- a "solid"
pill-- the time to boot a new fake ship can be reduced to a few minutes.
The solid pill (and other pills) live in the [Urbit repo][urbit]. To boot using
the solid pill, download the pill and then run:

View File

@ -1,6 +1,6 @@
# Official Urbit Docker Image
This is the official Docker image for [Urbit](https://urbit.org).
This is the official Docker image for [Urbit](https://urbit.org).
Urbit is a clean-slate OS and network for the 21st century.
@ -12,7 +12,7 @@ as described below.
### Volume Mount
This image expects a volume mounted at `/urbit`. This volume should initially contain one of
- A keyfile `<shipname>.key` for a galaxy, star, planet, or moon. See the setup instructions for Urbit for information on [obtaining a keyfile](https://urbit.org/using/install/).
- A keyfile `<shipname>.key` for a galaxy, star, planet, or moon. See the setup instructions for Urbit for information on [obtaining a keyfile](https://urbit.org/using/install/).
* e.g. `sampel-palnet.key` for the planet `sampel-palnet`.
- An empty file with the extension `.comet`. This will cause Urbit to boot a [comet](https://urbit.org/docs/glossary/comet/) in a pier named for the `.comet` file (less the extension).
* e.g. starting with an empty file `my-urbit-bot.comet` will result in Urbit booting a comet into the pier
@ -27,7 +27,7 @@ In consequence, it is safe to remove the container and start a new container whi
### Ports
The image includes `EXPOSE` directives for TCP port 80 and UDP port 34343. Port `80` is used for Urbit's HTTP interface for both [Landscape](https://urbit.org/docs/glossary/landscape/) and for [API calls](https://urbit.org/using/integrating-api/) to the ship. Port `34343` is set by default to be used by [Ames](https://urbit.org/docs/glossary/ames/) for ship-to-ship communication.
You can either pass the `-P` flag to docker to map ports directly to the corresponding ports on the host, or map them individually with `-p` flags. For local testing the latter is often convenient, for instance to remap port 80 to an unprivileged port.
You can either pass the `-P` flag to docker to map ports directly to the corresponding ports on the host, or map them individually with `-p` flags. For local testing the latter is often convenient, for instance to remap port 80 to an unprivileged port.
For best performance, you must map the Ames UDP port to the *same* port on the host. If you map to a different port Ames will not be able to make direct connections and your network performance may suffer somewhat. Note that using the same port is required for direct connections but is not by itself sufficient for them. If you are behind a NAT router or the host is not on a public IP address or you are firewalled, you may not achive direct connections regardless.

View File

@ -1,7 +1,7 @@
# Building Vere
We use [`bazel`][bazel][^1] to built Vere, which is packaged as a single binary,
`urbit`. We spport the following `(host, target)` pairs, where the host platform
We use [`bazel`][bazel][^1] to build Vere, which is packaged as a single binary,
`urbit`. We support the following `(host, target)` pairs, where the host platform
is where [`bazel`][bazel] runs and the target platform is where `urbit` will
run:

View File

@ -11,85 +11,12 @@ maintainers when ready.
Master is what's released on the network. Deployment instructions are in the
next section, but tagged releases should always come from this branch.
### Feature branches
Anyone can create feature branches. For those with commit access to
urbit/urbit, you're welcome to create them in this repo; otherwise, fork the
repo and create them there.
Usually, new development should start from master, but if your work depends on
work in another feature branch or release branch, start from there.
If, after starting your work, you need changes that are in master, merge it into
your branch. If you need changes that are in a release branch or feature
branch, merge it into your branch, but understand that your work now depends on
that release branch, which means it won't be released until that one is
released.
### Release branches
Release branches are code that is ready to release. All release branch names
should start with `next/`.
`next/vere` is the release branch.
All code must be reviewed before being pushed to a release branch. Thus,
feature branches should be PR'd against a release branch, not master.
Create new release branches as needed. You don't need a new one for every PR,
since many changes are relatively small and can be merged together with little
risk. However, once you merge two branches, they're now coupled and will only
be released together -- unless one of the underlying commits is separately put
on a release branch.
Here's a worked example. The rule is to make however many branches are useful,
and no more. This example is not prescriptive; the developers making the
changes may add, remove, or rename branches in this flow at will.
Suppose you (plural, the dev community at large) complete some work in a
userspace app, and you put it in `next/landscape`. Separately, you make a small
JS change. If you PR it to `next/landscape`, then it will only be released at
the same time as the app changes. Maybe this is fine, or maybe you want this
change to go out quickly, and the change in `next/landscape` is relatively
risky, so you don't want to push it out on Friday afternoon. In this case, put
the change in another release branch, say `next/js`. Now either can be released
independently.
Suppose you do further work that you want to PR to `next/landscape`, but it
depends on your fixes in `next/js`. Simply merge `next/js` into either your
feature branch or `next/landscape` and PR your finished work to
`next/landscape`. Now there is a one-way coupling: `next/landscape` contains
`next/js`, so releasing it will implicitly release `next/js`. However, you can
still release `next/js` independently.
This scheme extends to other branches, like `next/base` or `next/os1.1` or
`next/ford-fusion`. Some branches may be long-lived and represent simply the
"next" release of something, while others will have a definite lifetime that
corresponds to development of a particular feature or numbered release.
Since they are "done", release branches should be considered "public", in the
sense that others may depend on them at will. Thus, never rebase a release
branch.
When cutting a new release, you can filter branches with `git branch --list
'next/*'` or by typing "next/" in the branch filter on Github. This will give
you the list of branches which have passed review and may be merged to master
and released. When choosing which branches to release, make sure you understand
the risks of releasing them immediately. If merging these produces nontrivial
conflicts, consider asking the developers on those branches to merge between
themselves. In many cases a developer can do this directly, but if it's
sufficiently nontrivial, this may be a reviewed PR of one release branch into
another.
#### Standard release branches
While you can always create non-standard release branches to stage for a
particular release, most changes should go through the following:
- next/base -- changes to the %base desk in pkg/arvo
- next/garden -- changes to the %garden desk
- next/landscape -- changes to the %landscape desk
- next/bitcoin -- changes to the %bitcoin desk
- next/webterm -- changes to the %webterm desk
- next/vere -- changes to the runtime
All code must be reviewed before being pushed to the release branch. Thus,
issue branches should be PR'd against a release branch, not master.
### Other cases
@ -115,147 +42,16 @@ be duplicated in the history, but it won't have any serious side effects.
Here lies an informal guide for making hotfix releases and deploying them to
the network.
Take [this PR][1], as an example. This constituted a great hotfix. It's a
single commit, targeting a problem that existed on the network at the time.
Here's how it should be released and deployed OTA.
[1]: https://github.com/urbit/urbit/pull/2025
Ideally, hotfixes should consist of a single commit, targeting a problem that
existed in the latest runtime at the time.
### If the thing is acceptable to merge, merge it to master
Unless it's very trivial, it should probably have a single "credible looking"
review from somebody else on it.
You should avoid merging the PR in GitHub directly. Instead, use the
`sh/merge-with-custom-msg` script -- it will produce a merge commit with
message along the lines of:
```
Merge branch FOO (#PR_NUM)
* FOO:
bar: ...
baz: ...
Signed-off-by: SIGNER <signer@example.com>
```
We do this as it's nice to have the commit log information in the merge commit,
which GitHub's "Merge PR" button doesn't do (at least by default).
`sh/merge-with-custom-msg` performs some useful last-minute urbit-specific
checks, as well.
You might want to alias `sh/merge-with-custom-msg` locally, to make it easier
to use. My .git/config contains the following, for example:
```
[alias]
mu = !sh/merge-with-custom-msg
```
so that I can type e.g. `git mu origin/foo 1337`.
### Prepare a release commit
If you're making a Vere release, just play it safe and update all the pills.
To produce multi pills, you will need to set up an environment with the
appropriate desks with the appropriate contents, doing something like the
following (where `> ` denotes an urbit command and `% ` denotes a unix shell
command):
```console
> |merge %garden our %base
> |merge %landscape our %base
> |merge %bitcoin our %base
> |merge %webterm our %base
> |mount %
> |mount %garden
> |mount %landscape
> |mount %bitcoin
> |mount %webterm
% rsync -avL --delete pkg/arvo/ zod/base/
% rm -rf zod/base/tests/
% for desk in garden landscape bitcoin webterm; do \
rsync -avL --delete pkg/$desk/ zod/$desk/ \
done
> |commit %base
> |commit %garden
> |commit %landscape
> |commit %bitcoin
> |commit %webterm
> .multi/pill +solid %base %garden %landscape %bitcoin %webterm
> .multi-brass/pill +brass %base %garden %landscape %bitcoin %webterm
```
And then of course:
```console
> .solid/pill +solid
> .brass/pill +brass
> .ivory/pill +ivory
```
For an Urbit OS release, after all the merge commits, make a release with the
commit message "release: urbit-os-v1.0.xx". This commit should have up-to-date
artifacts from pkg/interface and a new version number in the desk.docket-0 of
any desk which changed. If neither the pill nor the JS need to be updated (e.g
if the pill was already updated in the previous merge commit), consider making
the release commit with --allow-empty.
If anything in `pkg/interface` has changed, ensure it has been built and
deployed properly. You'll want to do this before making a pill, since you want
the pill to have the new files/hash. For most things, it is sufficient to run
`npm install; npm run build:prod` in `pkg/interface`.
However, if you've made a change to Landscape's JS, then you will need to build
a "glob" and upload it to bootstrap.urbit.org. To do this, run `npm install;
npm run build:prod` in `pkg/interface`, and add the resulting
`pkg/arvo/app/landscape/index.[hash].js` to a fakezod at that path (or just create a
new fakezod with `urbit -F zod -B bin/solid.pill -A pkg/arvo`). Run
`:glob|make`, and this will output a file in `fakezod/.urb/put/glob-0vXXX.glob`.
Upload this file to bootstrap.urbit.org, and modify `+hash` at the top of
`pkg/arvo/app/glob.hoon` to match the hash in the filename of the `.glob` file.
Amend `pkg/arvo/app/landscape/index.html` to import the hashed JS bundle, instead
of the unversioned index.js. Do not commit the produced `index.js` and
make sure it doesn't end up in your pills (they should be less than 10MB each).
### Tag the resulting commit
What you should do here depends on the type of release being made.
First, for Urbit OS releases:
If it's a very trivial hotfix that you know isn't going to break anything, tag
it as `urbit-os-vx.y`. Here 'x' is the major version and 'y' is an OTA patch
counter. Change `urbit-os` to e.g. `landscape` or another desk if that's what you're
releasing. If you're releasing changes to more than one desk, add a separate
tag for each desk (but only make one announcment email/post, with all of the
desks listed).
Use an annotated tag, i.e.
```
git tag -a urbit-os-vx.y
```
The tag format should look something like this:
```
urbit-os-vx.y
This release will be pushed to the network as an over-the-air update.
Release notes:
[..]
Contributions:
[..]
```
You can get the "contributions" section by the shortlog between the
last release and this release:
@ -273,29 +69,28 @@ skimming the commit descriptions (or perhaps copying some of them in verbatim).
If the commit descriptions are too poor to easily do this, then again, yell at
your fellow contributors to make them better in the future.
If it's *not* a trivial hotfix, you should probably make any number of release
candidate tags (e.g. `urbit-os-vx.y.rc1`, `urbit-os-vx.y.rc2`, ..), test
them, and after you confirm one of them is good, tag the release as
`urbit-os-vx.y`.
For Vere releases:
Tag the release as `urbit-vx.y`. The tag format should look something like
this:
```
urbit-vx.y
Note that this Vere release will by default boot fresh ships using an Urbit OS
Note that this release will by default boot fresh ships using an Urbit OS
va.b.c pill.
Release binaries:
(linux64)
https://bootstrap.urbit.org/urbit-vx.y-linux64.tgz
(linux-arm64)
https://bootstrap.urbit.org/urbit-vx.y-linux-arm64.tgz
(macOS)
https://bootstrap.urbit.org/urbit-vx.y-darwin.tgz
(linux-x86_64)
https://bootstrap.urbit.org/urbit-vx.y-linux-x86_64.tgz
(macos-arm64)
https://bootstrap.urbit.org/urbit-vx.y-macos-arm64.tgz
(macos-x86_64)
https://bootstrap.urbit.org/urbit-vx.y-macos-x86_64.tgz
Release notes:
@ -306,49 +101,15 @@ Contributions:
[..]
```
Ensure the Vere release is marked as the 'latest' release and upload the two
Ensure the release is marked as the 'latest' release and upload the four
`.tgz` files to the release as `darwin.tgz` and `linux64.tgz`;
this allows us to programmatically retrieve the latest release at
[urbit.org/install/mac/latest/](https://urbit.org/install/mac/latest) and
[urbit.org/install/linux64/latest](https://urbit.org/install/linux64/latest),
respectively.
The same schpeel re: release candidates applies here.
Note that the release notes indicate which version of Urbit OS the Vere release
will use by default when booting fresh ships. Do not include implicit Urbit OS
changes in Vere releases; this used to be done, historically, but shouldn't be
any longer. If there are Urbit OS and Vere changes to be released, make two
separate releases.
### Deploy the update
(**Note**: the following steps are automated by some other Tlon-internal
tooling. Just ask `~nidsut-tomdun` for details.)
For Urbit OS updates, this means copying the files into ~zod's %base desk. The
changes should be merged into /~zod/kids and then propagated through other galaxies
and stars to the rest of the network.
For consistency, I create a release tarball and then rsync the files in.
```
$ wget https://github.com/urbit/urbit/archive/urbit-os-vx.y.tar.gz
$ tar xzf urbit-os-vx.y.tar.gz
$ herb zod -p hood -d "+hood/mount /=base="
$ rsync -zr --delete urbit-urbit-os-vx.y/pkg/arvo/ zod/base
$ herb zod -p hood -d "+hood/commit %base"
$ herb zod -p hood -d "+hood/merge %kids our %base"
```
For Vere updates, this means simply shutting down each desired ship, installing
the new binary, and restarting the pier with it.
this allows us to programmatically retrieve the latest releases at
the corresponding platform's URL: `https://urbit.org/install/{platform}/latest`.
### Announce the update
Post an announcement to urbit-dev. The tag annotation, basically, is fine here
-- I usually add the %cz hash (for Urbit OS releases) and the release binary
URLs (for Vere releases). Check the urbit-dev archives for examples of these
announcements.
-- I usually add the release binary URLs. Check the urbit-dev archives for examples
of these announcements.
Post the same announcement to the group feed of Urbit Community.

View File

@ -101,8 +101,7 @@ register_toolchains(
load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies")
# See
# https://bazelbuild.github.io/rules_foreign_cc/0.9.0/flatten.html#rules_foreign_cc_dependencies.
# See https://bazelbuild.github.io/rules_foreign_cc/0.9.0/flatten.html#rules_foreign_cc_dependencies.
rules_foreign_cc_dependencies(
register_built_tools = False,
register_default_tools = False,
@ -187,6 +186,13 @@ versioned_http_file(
version = "1.15",
)
versioned_http_file(
name = "solid_pill",
sha256 = "d737f88463f683173b5f6cbf41fc38705d4d3d67263c675d7e99841cd8485d81",
url = "https://github.com/urbit/urbit/raw/{version}/bin/solid.pill",
version = "next/vere",
)
versioned_http_archive(
name = "keccak_tiny",
build_file = "//bazel/third_party/keccak_tiny:keccak_tiny.BUILD",
@ -287,6 +293,15 @@ versioned_http_archive(
version = "1.5.1",
)
versioned_http_file(
name = "urbit",
sha256 = "403a2691dcc0cbff60157e3f91ffe15f4c8bc9dd5e1acab1d438b84cc8ef7711",
url = "https://github.com/urbit/urbit/archive/{version}.tar.gz",
# We can't use a branch name for the `version` because each new commit
# will change the SHA256 hash.
version = "39a104f872736c3b0b879d592302b86a67ad078b",
)
versioned_http_archive(
name = "uv",
build_file = "//bazel/third_party/uv:uv.BUILD",
@ -313,3 +328,61 @@ versioned_http_archive(
url = "https://www.zlib.net/zlib-{version}.tar.gz",
version = "1.2.13",
)
#
# GOLANG (DOCKER DEPENDENCY)
#
# These must be loaded before the Docker rules.
# See https://github.com/bazelbuild/rules_docker/issues/2075#issuecomment-1115954091.
versioned_http_archive(
name = "io_bazel_rules_go",
sha256 = "f2dcd210c7095febe54b804bb1cd3a58fe8435a909db2ec04e31542631cf715c",
url = "https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.zip",
version = "0.31.0",
)
versioned_http_archive(
name = "bazel_gazelle",
sha256 = "efbbba6ac1a4fd342d5122cbdfdb82aeb2cf2862e35022c752eaddffada7c3f3",
url = "https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz",
version = "0.27.0",
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
go_rules_dependencies()
go_register_toolchains(version = "1.18")
gazelle_dependencies(go_repository_default_config = "//:WORKSPACE.bazel")
#
# DOCKER
#
versioned_http_archive(
name = "io_bazel_rules_docker",
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
url = "https://github.com/bazelbuild/rules_docker/releases/download/v{version}/rules_docker-v{version}.tar.gz",
version = "0.25.0",
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
)
container_repositories()
load("@io_bazel_rules_docker//repositories:deps.bzl", container_deps = "deps")
container_deps(
# See https://github.com/bazelbuild/rules_docker/issues/1902.
go_repository_default_config = "@//:WORKSPACE.bazel",
)
load("@io_bazel_rules_docker//cc:image.bzl", _cc_image_repos = "repositories")
_cc_image_repos()

View File

@ -14,14 +14,6 @@ cc_library(
"-fPIC",
"-c",
],
"@platforms//os:openbsd": [
"-fPIC",
"-c",
],
"@platforms//os:windows": [
"-fPIC",
"-c",
],
"//conditions:default": [],
}),
includes = ["."],

View File

@ -13,14 +13,12 @@ configure_make(
"//conditions:default": ["--jobs=`nproc`"],
}),
configure_command = select({
"@platforms//os:windows": "Configure",
"@//:linux_arm64": "Configure",
"//conditions:default": "config",
}),
configure_options = [
"no-shared",
] + select({
"@platforms//os:windows": ["mingw64"],
"@//:linux_arm64": [
"linux-aarch64",
# Native compilation on linux-arm64 isn't supported. The prefix is
@ -32,10 +30,6 @@ configure_make(
],
"//conditions:default": [],
}),
configure_prefix = select({
"@platforms//os:windows": "perl",
"//conditions:default": "",
}),
lib_source = ":all",
out_static_libs = [
"libssl.a",

View File

@ -171,6 +171,8 @@ cc_toolchain_config(
sys_includes = [
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include",
"/Library/Developer/CommandLineTools/usr/lib/clang/{compiler_version}/include",
"/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include",
"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/{compiler_version}/include",
],
target_cpu = "arm64",
toolchain_identifier = _macos_arm64_clang,
@ -228,6 +230,8 @@ cc_toolchain_config(
sys_includes = [
"/Library/Developer/CommandLineTools/usr/lib/clang/{compiler_version}/include",
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/",
"/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include",
"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/{compiler_version}/include",
],
target_cpu = "x86_64",
toolchain_identifier = _macos_x86_64_clang,
@ -260,11 +264,68 @@ toolchain(
toolchain_type = ":toolchain_type",
)
#
# brew-clang-macos-x86_64
#
# Toolchain identifier.
_macos_x86_64_brew_clang = "toolchain-brew-clang-macos-x86_64"
cc_toolchain_config(
name = "brew-clang-macos-x86_64-config",
# NOTE: building with `libtool` does not work on macOS due to lack of
# support in the `configure_make` rule provided by `rules_foreign_cc`.
# Therefore, we require setting `ar` as the archiver tool on macOS.
ar = "/usr/bin/ar",
# By default, Bazel passes the `rcsD` flags to `ar`, but macOS's `ar`
# implementation doesn't support `D`. We remove it with this attribute
# and corresponding `ar_flags_feature` in `cfg.bzl`.
# See https://github.com/bazelbuild/bazel/issues/15875.
ar_flags = "rcs",
cc = "/usr/local/opt/llvm@14/bin/clang",
compiler = "clang",
compiler_version = "//:clang_version",
ld = "/usr/bin/ld",
sys_includes = [
"/usr/local/Cellar/llvm@14/{compiler_version}/lib/clang/{compiler_version}/include",
"/Library/Developer/CommandLineTools/SDKs/MacOSX12.sdk/usr/include",
],
target_cpu = "x86_64",
toolchain_identifier = _macos_x86_64_brew_clang,
)
cc_toolchain(
name = "brew-clang-macos-x86_64",
all_files = ":empty",
compiler_files = ":empty",
dwp_files = ":empty",
linker_files = ":empty",
objcopy_files = ":empty",
strip_files = ":empty",
supports_param_files = 0,
toolchain_config = ":brew-clang-macos-x86_64-config",
toolchain_identifier = _macos_x86_64_brew_clang,
)
toolchain(
name = "brew-clang-macos-x86_64-toolchain",
exec_compatible_with = [
"@platforms//os:macos",
"@platforms//cpu:x86_64",
],
target_compatible_with = [
"@platforms//os:macos",
"@platforms//cpu:x86_64",
],
toolchain = ":brew-clang-macos-x86_64",
toolchain_type = ":toolchain_type",
)
#
# BOOTSTRAPPING
#
# We can't build some artifcacts, like musl libc and its toolchain, in Bazel
# We can't build some artifacts, like musl libc and its toolchain, in Bazel
# itself (i.e. by adding a remote repository to `WORKSPACE.bazel` and a
# corresponding `BUILD` file in `bazel/third_party/<dependency>`) because doing
# so introduces a circular dependency during Bazel C/C++ toolchain resolution.
@ -281,15 +342,15 @@ genrule(
name = "install-aarch64-linux-musl-gcc",
outs = ["install-aarch64-linux-musl-gcc.sh"],
cmd = """
echo 'aarch64_linux_musl_install={}/aarch64-linux-musl' > $@
echo 'if [ ! -d $$aarch64_linux_musl_install ]; then' >> $@
echo 'aarch64_linux_musl_install={}/aarch64-linux-musl' > $@
echo 'if [ ! -d $$aarch64_linux_musl_install ]; then' >> $@
echo ' wget -c {}' >> $@
echo ' tar -xf {}.tar.gz' >> $@
echo ' archive=musl-cross-make-{}' >> $@
echo ' echo OUTPUT=$$aarch64_linux_musl_install > $$archive/config.mak' >> $@
echo ' TARGET=aarch64-linux-musl make -C$$archive -j`nproc`' >> $@
echo ' sudo TARGET=aarch64-linux-musl make -C$$archive -j`nproc` install' >> $@
echo 'fi' >> $@
echo ' tar -xf {}.tar.gz' >> $@
echo ' archive=musl-cross-make-{}' >> $@
echo ' echo OUTPUT=$$aarch64_linux_musl_install > $$archive/config.mak' >> $@
echo ' TARGET=aarch64-linux-musl make -s -C$$archive -j`nproc`' >> $@
echo ' sudo TARGET=aarch64-linux-musl make -s -C$$archive -j`nproc` install' >> $@
echo 'fi' >> $@
""".format(
_install_prefix,
_musl_cross_make_archive,
@ -297,6 +358,7 @@ genrule(
_musl_cross_make_version,
),
exec_compatible_with = ["@platforms//os:linux"],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:private"],
)
@ -304,6 +366,7 @@ sh_binary(
name = "aarch64-linux-musl-gcc",
srcs = ["install-aarch64-linux-musl-gcc"],
exec_compatible_with = ["@platforms//os:linux"],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
)
@ -311,15 +374,15 @@ genrule(
name = "install-x86_64-linux-musl-gcc",
outs = ["install-x86_64-linux-musl-gcc.sh"],
cmd = """
echo 'x86_64_linux_musl_install={}/x86_64-linux-musl' > $@
echo 'if [ ! -d $$x86_64_linux_musl_install ]; then' >> $@
echo 'x86_64_linux_musl_install={}/x86_64-linux-musl' > $@
echo 'if [ ! -d $$x86_64_linux_musl_install ]; then' >> $@
echo ' wget -c {}' >> $@
echo ' tar -xf {}.tar.gz' >> $@
echo ' archive=musl-cross-make-{}' >> $@
echo ' echo OUTPUT=$$x86_64_linux_musl_install > $$archive/config.mak' >> $@
echo ' TARGET=x86_64-linux-musl make -C$$archive -j`nproc`' >> $@
echo ' sudo TARGET=x86_64-linux-musl make -C$$archive -j`nproc` install' >> $@
echo 'fi' >> $@
echo ' tar -xf {}.tar.gz' >> $@
echo ' archive=musl-cross-make-{}' >> $@
echo ' echo OUTPUT=$$x86_64_linux_musl_install > $$archive/config.mak' >> $@
echo ' TARGET=x86_64-linux-musl make -s -C$$archive -j`nproc`' >> $@
echo ' sudo TARGET=x86_64-linux-musl make -s -C$$archive -j`nproc` install' >> $@
echo 'fi' >> $@
""".format(
_install_prefix,
_musl_cross_make_archive,
@ -327,6 +390,7 @@ genrule(
_musl_cross_make_version,
),
exec_compatible_with = ["@platforms//os:linux"],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:private"],
)
@ -334,5 +398,6 @@ sh_binary(
name = "x86_64-linux-musl-gcc",
srcs = ["install-x86_64-linux-musl-gcc"],
exec_compatible_with = ["@platforms//os:linux"],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
)

View File

@ -17,27 +17,14 @@ cc_library(
defines = [
# We don't build on any big endian CPUs.
"U3_OS_ENDIAN_little=1",
"U3_OS_PROF=1",
] + select({
"@platforms//cpu:aarch64": ["U3_CPU_aarch64=1"],
"//conditions:default": [],
}) + select({
"@platforms//os:freebsd": [
"U3_OS_bsd=1",
],
"@platforms//os:linux": [
"U3_OS_linux=1",
"U3_OS_PROF=1",
],
"@platforms//os:macos": [
"U3_OS_osx=1",
# TODO: check if this works on M1 Macs.
"U3_OS_PROF=1",
],
"@platforms//os:openbsd": [
"U3_OS_bsd=1",
"U3_OS_no_ubc=1",
],
"@platforms//os:windows": ["U3_OS_mingw=1"],
"@platforms//os:linux": ["U3_OS_linux=1"],
"@platforms//os:macos": ["U3_OS_osx=1"],
"//conditions:default": [],
}),
include_prefix = "c3",
includes = ["."],

View File

@ -17,7 +17,6 @@ cc_library(
includes = ["."],
linkstatic = True,
local_defines = select({
"@platforms//os:windows": ["ENT_GETENTROPY_BCRYPTGENRANDOM"],
"@platforms//os:macos": ["ENT_GETENTROPY_SYSRANDOM"],
# TODO: support fallback to other options if `getrandom()` isn't
# available in `unistd.h`. Preferred order (from most preferred to least

View File

@ -36,16 +36,12 @@ cc_library(
"@gmp",
"@murmur3",
"@openssl",
"@sigsegv",
"@softfloat",
] + select({
# We don't use `libsigsegv` on Windows because of performance reasons.
"@platforms//os:windows": [],
"//conditions:default": ["@sigsegv"],
}) + select({
"@platforms//os:macos": ["//pkg/noun/platform/darwin"],
"@platforms//os:linux": ["//pkg/noun/platform/linux"],
"@platforms//os:openbsd": ["//pkg/noun/platform/openbsd"],
"@platforms//os:windows": ["//pkg/noun/platform/mingw"],
"//conditions:default": [],
}),
)

View File

@ -3,9 +3,6 @@ cc_library(
hdrs = ["rsignal.h"],
include_prefix = "platform",
includes = ["."],
target_compatible_with = select({
"@platforms//os:macos": [],
"//conditions:default": ["@platforms//:incompatible"],
}),
target_compatible_with = ["@platforms//os:macos"],
visibility = ["//pkg:__subpackages__"],
)

View File

@ -3,9 +3,6 @@ cc_library(
hdrs = ["rsignal.h"],
include_prefix = "platform",
includes = ["."],
target_compatible_with = select({
"@platforms//os:linux": [],
"//conditions:default": ["@platforms//:incompatible"],
}),
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//pkg:__subpackages__"],
)

View File

@ -1,12 +0,0 @@
cc_library(
name = "mingw",
srcs = ["rsignal.c"],
hdrs = ["rsignal.h"],
include_prefix = "platform",
includes = ["."],
target_compatible_with = select({
"@platforms//os:windows": [],
"//conditions:default": ["@platforms//:incompatible"],
}),
visibility = ["//pkg:__subpackages__"],
)

View File

@ -1,200 +0,0 @@
#include "rsignal.h"
#include <windows.h>
int
err_win_to_posix(DWORD winerr);
// The current implementation of rsignal_ is single-threaded,
// but it can be extended to multi-threaded by replacing these
// static variables with a thread id-based hash map.
//
static __p_sig_fn_t _fns[SIG_COUNT];
static volatile DWORD _tid;
static HANDLE _hvt;
void
rsignal_install_handler(int sig, __p_sig_fn_t fn)
{
if ( sig < 0 || sig >= SIG_COUNT )
return;
DWORD newtid = GetCurrentThreadId();
DWORD oldtid = InterlockedExchange(&_tid, newtid);
if ( oldtid != 0 && oldtid != newtid ) {
fprintf(stderr,
"\r\nrsignal_install_handler: %u -> %u\r\n",
oldtid,
newtid);
return;
}
__p_sig_fn_t oldfn = InterlockedExchangePointer((PVOID*)&_fns[sig], fn);
if ( fn != 0 && oldfn != 0 && oldfn != fn ) {
fprintf(stderr, "\r\nrsignal_install_handler: %p -> %p\r\n", oldfn, fn);
}
}
void
rsignal_deinstall_handler(int sig)
{
rsignal_install_handler(sig, 0);
}
void
rsignal_raise(int sig)
{
if ( sig < 0 || sig >= SIG_COUNT )
return;
__p_sig_fn_t oldfn = InterlockedExchangePointer((PVOID*)&_fns[sig], 0);
if ( oldfn == 0 )
return;
if ( _tid == GetCurrentThreadId() ) {
oldfn(sig);
return;
}
HANDLE hthread = OpenThread(THREAD_ALL_ACCESS, FALSE, _tid);
if ( !hthread ) {
fprintf(stderr,
"\r\nrsignal_raise: OpenThread(%u): %d\r\n",
_tid,
GetLastError());
return;
}
if ( SuspendThread(hthread) < 0 ) {
fprintf(stderr,
"\r\nrsignal_raise: SuspendThread(%u): %d\r\n",
_tid,
GetLastError());
goto cleanup;
}
oldfn(sig);
if ( !ResumeThread(hthread) ) {
fprintf(stderr,
"\r\nrsignal_raise: ResumeThread(%u): %d\r\n",
_tid,
GetLastError());
// abort because the main thread is stuck
abort();
}
cleanup:
CloseHandle(hthread);
}
static void
_rsignal_vt_cb(PVOID param, BOOLEAN timedOut)
{
rsignal_raise(SIGVTALRM);
}
int
rsignal_setitimer(int type, struct itimerval* in, struct itimerval* out)
{
if ( in == 0 ) {
errno = EFAULT;
return -1;
}
if ( type != ITIMER_VIRTUAL || out != 0 ) {
errno = ENOTSUP;
return -1;
}
if ( _hvt != NULL ) {
DeleteTimerQueueTimer(NULL, _hvt, NULL);
_hvt = NULL;
}
if ( timerisset(&in->it_value)
&& !CreateTimerQueueTimer(
&_hvt,
NULL,
_rsignal_vt_cb,
NULL,
in->it_value.tv_sec * 1000 + in->it_value.tv_usec / 1000,
in->it_interval.tv_sec * 1000 + in->it_interval.tv_usec / 1000,
0) )
{
errno = err_win_to_posix(GetLastError());
return -1;
}
else {
return 0;
}
}
// direct import from ntdll.dll
extern DWORD64 __imp_KiUserExceptionDispatcher;
static void
_rsignal_longjmp(intptr_t* builtin_jb)
{
__builtin_longjmp(builtin_jb, 1);
}
void
rsignal_post_longjmp(DWORD tid, intptr_t* builtin_jb)
{
HANDLE hthread = OpenThread(THREAD_ALL_ACCESS, FALSE, tid);
if ( !hthread ) {
fprintf(stderr, "\r\nrsignal: OpenThread(%u): %d\r\n", tid, GetLastError());
return;
}
CONTEXT context;
context.ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER;
if ( !GetThreadContext(hthread, &context) ) {
fprintf(stderr,
"\r\nrsignal: GetThreadContext(%u): %d\r\n",
tid,
GetLastError());
goto cleanup;
}
// see if the thread is currently handling a structured exception
// if so, let the handler (usually the libsigsegv handler) finish
// and set up the the signal to run at the exception resume point
// otherwise, passing a parameter to fn is completely unreliable
//
DWORD64 kibase;
PRUNTIME_FUNCTION ki
= RtlLookupFunctionEntry(__imp_KiUserExceptionDispatcher, &kibase, NULL);
CONTEXT c = context;
while ( 1 ) {
DWORD64 base, frame;
PRUNTIME_FUNCTION f = RtlLookupFunctionEntry(c.Rip, &base, NULL);
if ( !f )
break;
if ( f == ki ) {
// KiUserExceptionDispatcher has a "bare" frame
// with $rsp pointing to the CONTEXT structure
//
((PCONTEXT)c.Rsp)->Rip = (DWORD64)_rsignal_longjmp;
((PCONTEXT)c.Rsp)->Rcx = (DWORD64)builtin_jb;
goto cleanup;
}
PVOID handler_data;
RtlVirtualUnwind(0, base, c.Rip, f, &c, &handler_data, &frame, NULL);
}
context.Rip = (DWORD64)_rsignal_longjmp;
context.Rcx = (DWORD64)builtin_jb;
if ( !SetThreadContext(hthread, &context) ) {
fprintf(stderr,
"\r\nrsignal: SetThreadContext(%u): %d\r\n",
tid,
GetLastError());
goto cleanup;
}
cleanup:
CloseHandle(hthread);
}

View File

@ -1,41 +0,0 @@
#ifndef NOUN_PLATFORM_MINGW_RSIGNAL_H
#define NOUN_PLATFORM_MINGW_RSIGNAL_H
typedef struct {
jmp_buf jb;
unsigned long tid;
} rsignal_jmpbuf;
#define rsignal_setjmp(buf) (buf.tid = GetCurrentThreadId(), setjmp(buf.jb))
#define rsignal_longjmp(buf, val) \
if ( buf.tid != GetCurrentThreadId() ) { \
buf.jb.retval = (val); \
rsignal_post_longjmp(buf.tid, buf.jb.buffer); \
} \
else { \
longjmp(buf.jb, val) \
}
void
rsignal_raise(int sig);
void
rsignal_install_handler(int sig, __p_sig_fn_t fn);
void
rsignal_deinstall_handler(int sig);
void
rsignal_post_longjmp(unsigned long tid, intptr_t* builtin_jb);
#define ITIMER_VIRTUAL 1
struct itimerval {
struct timeval it_value;
struct timeval it_interval;
};
int
rsignal_setitimer(int type, struct itimerval* in, struct itimerval* out);
#endif /* ifndef NOUN_PLATFORM_MINGW_RSIGNAL_H */

View File

@ -1,11 +0,0 @@
cc_library(
name = "openbsd",
hdrs = ["rsignal.h"],
include_prefix = "platform",
includes = ["."],
target_compatible_with = select({
"@platforms//os:openbsd": [],
"//conditions:default": ["@platforms//:incompatible"],
}),
visibility = ["//pkg:__subpackages__"],
)

View File

@ -1,13 +0,0 @@
/// @file
#ifndef NOUN_PLATFORM_OPENBSD_RSIGNAL_H
#define NOUN_PLATFORM_OPENBSD_RSIGNAL_H
#define rsignal_jmpbuf sigjmp_buf
#define rsignal_setjmp(buf) sigsetjmp((buf), 1)
#define rsignal_longjmp siglongjmp
#define rsignal_install_handler signal
#define rsignal_deinstall_handler(sig) signal((sig), SIG_IGN)
#define rsignal_setitimer setitimer
#endif /* ifndef NOUN_PLATFORM_OPENBSD_RSIGNAL_H */

View File

@ -141,8 +141,6 @@ cc_library(
"platform/linux/daemon.c",
"platform/linux/ptty.c",
],
"@platforms//os:openbsd": [],
"@platforms//os:windows": [],
}),
hdrs = [
"db/lmdb.h",
@ -275,3 +273,248 @@ cc_test(
"//pkg/ur",
],
)
#
# FAKE SHIP TESTS
#
genrule(
name = "boot-fake-ship",
srcs = [
"@solid_pill//file",
"@urbit//file",
],
outs = ["fakebus.zip"],
cmd = """
set -xeuo pipefail
mkdir ./urbit
tar xfz $(execpath @urbit//file) -C ./urbit --strip-components=1
$(execpath :urbit) --lite-boot --daemon --fake bus \
--bootstrap $(execpath @solid_pill//file) \
--arvo ./urbit/pkg/arvo \
./pier
cleanup() {
if [ -f ./pier/.vere.lock ]; then
kill $$(< ./pier/.vere.lock) || true
fi
set +x
}
trap cleanup EXIT
port=$$(grep loopback ./pier/.http.ports | awk -F ' ' '{print $$1}')
lensd() {
curl -s \
--data "{\\"source\\":{\\"dojo\\":\\"$$1\\"},\\"sink\\":{\\"stdout\\":null}}" \
"http://localhost:$$port" | xargs printf %s | sed 's/\\\\n/\\n/g'
}
lensa() {
curl -s \
--data "{\\"source\\":{\\"dojo\\":\\"$$2\\"},\\"sink\\":{\\"app\\":\\"$$1\\"}}" \
"http://localhost:$$port" | xargs printf %s | sed 's/\\\\n/\\n/g'
}
check() {
[ 3 -eq $$(lensd 3) ]
}
if check && sleep 10 && check; then
echo "boot success"
lensa hood "+hood/exit"
while [ -f ./pier/.vere.lock ]; do
echo "waiting for pier to shut down"
sleep 5
done
else
echo "boot failure"
kill $$(< ./pier/.vere.lock) || true
set +x
exit 1
fi
set +x
ls
ls -a ./pier
zip -q -r $@ ./pier
""",
tools = [":urbit"],
visibility = ["//visibility:public"],
)
genrule(
name = "test-fake-ship",
srcs = [
":boot-fake-ship",
"//pkg/vere:VERSION",
],
outs = ["test-fake-ship-output.zip"],
cmd = """
cp $(execpath :boot-fake-ship) pier.zip
unzip -qq pier.zip
chmod -R u+rw pier
set -x
$(execpath :urbit) --lite-boot --daemon ./pier 2> urbit-output
port=$$(grep loopback ./pier/.http.ports | awk -F ' ' '{print $$1}')
lensd() {
curl -s \
--data "{\\"source\\":{\\"dojo\\":\\"$$1\\"},\\"sink\\":{\\"stdout\\":null}}" \
"http://localhost:$$port" | xargs printf %s | sed 's/\\\\n/\\n/g'
}
lensa() {
curl -s \
--data "{\\"source\\":{\\"dojo\\":\\"$$2\\"},\\"sink\\":{\\"app\\":\\"$$1\\"}}" \
"http://localhost:$$port" | xargs printf %s | sed 's/\\\\n/\\n/g'
}
tail -F urbit-output >&2 &
tailproc=$$!
cleanup () {
kill $$(cat ./pier/.vere.lock) || true
kill "$$tailproc" 2>/dev/null || true
set +x
}
trap cleanup EXIT
# measure initial memory usage
#
lensd '~& ~ ~& %init-mass-start ~'
lensa hood '+hood/mass'
lensd '~& ~ ~& %init-mass-end ~'
# run the unit tests
#
lensd '~& ~ ~& %test-unit-start ~'
lensd '-test %/tests ~'
lensd '~& ~ ~& %test-unit-end ~'
# use the :test app to build all agents, generators, and marks
#
lensa hood '+hood/start %test'
lensd '~& ~ ~& %test-agents-start ~'
lensa test '%agents'
lensd '~& ~ ~& %test-agents-end ~'
lensd '~& ~ ~& %test-generators-start ~'
lensa test '%generators'
lensd '~& ~ ~& %test-generators-end ~'
lensd '~& ~ ~& %test-marks-start ~'
lensa test '%marks'
lensd '~& ~ ~& %test-marks-end ~'
# measure memory usage post tests
#
lensd '~& ~ ~& %test-mass-start ~'
lensa hood '+hood/mass'
lensd '~& ~ ~& %test-mass-end ~'
# defragment the loom
#
lensd '~& ~ ~& %pack-start ~'
lensa hood '+hood/pack'
lensd '~& ~ ~& %pack-end ~'
# reclaim space within arvo
#
lensd '~& ~ ~& %trim-start ~'
lensa hood '+hood/trim'
lensd '~& ~ ~& %trim-end ~'
# measure memory usage pre |meld
#
lensd '~& ~ ~& %trim-mass-start ~'
lensa hood '+hood/mass'
lensd '~& ~ ~& %trim-mass-end ~'
# globally deduplicate
#
lensd '~& ~ ~& %meld-start ~'
lensa hood '+hood/meld'
lensd '~& ~ ~& %meld-end ~'
# measure memory usage post |meld
#
lensd '~& ~ ~& %meld-mass-start ~'
lensa hood '+hood/mass'
lensd '~& ~ ~& %meld-mass-end ~'
lensa hood '+hood/exit'
cleanup
# Collect output
cp urbit-output test-output-unit
cp urbit-output test-output-agents
cp urbit-output test-output-generators
cp urbit-output test-output-marks
# TODO: when re-enabling fake ship tests on macOS, use `sed -i ''`
# instead of `sed -i`.
sed -i '0,/test-unit-start/d' test-output-unit
sed -i '/test-unit-end/,$$d' test-output-unit
sed -i '0,/test-agents-start/d' test-output-agents
sed -i '/test-agents-end/,$$d' test-output-agents
sed -i '0,/test-generators-start/d' test-output-generators
sed -i '/test-generators-end/,$$d' test-output-generators
sed -i '0,/test-marks-start/d' test-output-marks
sed -i '/test-marks-end/,$$d' test-output-marks
OUTDIR="$$(pwd)/test-fake-ship-output"
mkdir -p $$OUTDIR
cp test-output-* $$OUTDIR
set +x
hdr () {
echo =====$$(sed 's/./=/g' <<< "$$1")=====
echo ==== $$1 ====
echo =====$$(sed 's/./=/g' <<< "$$1")=====
}
for f in $$(find "$$OUTDIR" -type f); do
hdr "$$(basename $$f)"
cat "$$f"
done
fail=0
for f in $$(find "$$OUTDIR" -type f); do
if egrep "((FAILED|CRASHED)|warn:) " $$f >/dev/null; then
if [[ $$fail -eq 0 ]]; then
hdr "Test Failures"
fi
echo "ERROR Test failure in $$(basename $$f)"
((fail++))
fi
done
if [[ $$fail -eq 0 ]]; then
hdr "Success"
fi
zip -q -r $@ $$OUTDIR
exit "$$fail"
""",
tools = [":urbit"],
visibility = ["//visibility:public"],
)