Create a Docker image to run the map importer in the almighty cloud. #326

This commit is contained in:
Dustin Carlino 2021-05-06 14:55:51 -07:00
parent 4ac8c6e30b
commit 68f1225f22
4 changed files with 70 additions and 0 deletions

7
.dockerignore Normal file
View File

@ -0,0 +1,7 @@
# Everything is ignored, except for the following
*
!target/release/importer
!target/release/updater
!data/MANIFEST.json
!importer/config/*
!cloud/*

8
cloud/Dockerfile Normal file
View File

@ -0,0 +1,8 @@
FROM ubuntu:20.04
WORKDIR /abstreet
COPY target/release/importer ./target/release/
COPY target/release/updater ./target/release/
COPY data/MANIFEST.json ./data/
COPY importer/config ./importer/config/
COPY cloud/import_one_city.sh .

23
cloud/import_one_city.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
# This script runs inside the abst importer Docker container. It imports a
# single city, then pushes the results to a temporary subdirectory in S3.
set -e
set -x
EXPERIMENT_TAG=$1
CITY=$2
if [ "$EXPERIMENT_TAG" == "" ] || [ "$CITY" == "" ]; then
echo Missing args;
exit 1;
fi
# If we import --raw without any files, we would wind up downloading fresh OSM
# data. We want to reuse whatever's in S3, and explicitly grab fresh OSM
# through a different process.
mkdir -p data/player
echo "{\"runtime\": [], \"input\": [\"$CITY\"]}" > data/player/data.json
./target/release/updater
# TODO --scenario for some cities
./target/release/importer --raw --map --city=$CITY

32
cloud/start_batch_import.sh Executable file
View File

@ -0,0 +1,32 @@
#!/bin/bash
# This script packages up the importer as it exists in the current git repo,
# deploys it to AWS Batch, and regenerates maps and scenarios for all cities.
#
# This process is only runnable by Dustin, due to current S3/EC2 permissions.
#
# Run from the repo's root dir: cloud/workflow.sh
set -e
set -x
EXPERIMENT_TAG=$1
if [ "$EXPERIMENT_TAG" == "" ]; then
echo Missing args;
exit 1;
fi
# It's a faster workflow to copy the local binaries into Docker, rather than
# build them inside the container. But it does require us to build the importer
# without the GDAL bindings, since the dynamic linking won't transfer over to
# the Docker image.
#
# GDAL bindings are only used when initially building popdat.bin for Seatle;
# there's almost never a need to regenerate this, and it can be done locally
# when required.
cargo build --release --bin importer --bin updater
docker build -f cloud/Dockerfile -t importer .
# To manually play around with the container: docker run -it importer /bin/bash
# TODO Upload the image to Docker Hub with a user-specified experiment tag
# TODO Kick off an AWS batch job