temper-tv/main-site/scripts/inject-s3.sh

53 lines
1.3 KiB
Bash
Raw Normal View History

#!/bin/bash
# This script is a cheesy way of injecting "large" files that I don't want to
# track with LFS into S3.
# > Why?
# Example is I have an image that is huge ( think dozens of MB large ) that I
# shrink down for the purpose of
set -e
usage() {
cat <<- EOF
$1
Required:
Always make sure this is ran from the PROJECT_ROOT/main-site folder
Usage:
bash scripts/inject-s3.sh my-file.ext /path/to/s3/destination
Example:
bash scripts/inject-s3.sh ~/Downloads/big-vid.webm /video/cool-vid.webm
EOF
}
[[ $(basename $PWD) != main-site ]] && usage && exit 1
# Ensure the target local file is real
[[ ! -f "$1" ]] && usage "ERROR: Target file is not readable" && exit 1
# Ensure we are a given a destination in s3
[[ -z "$2" ]] && usage "ERROR: No S3 destination given" && exit 1
# Next check to see if we are going to overwrite the file in s3
bucket=temper.tv
if aws s3api head-object --bucket $bucket --key "$2"; then
echo "Found file at \"$2\", are you sure you want to overwrite? y/n"
read resp
if echo $resp | grep -i y; then
aws s3 cp "$1" "s3://${bucket}${2}"
else
echo "No overwrite requested, exiting (0) now"
fi
else
echo File location free, uploading now...
aws s3 cp "$1" "s3://${bucket}${2}"
fi
# If we get this far it's because we found a file in place and want to ensure
# we don't overwrite on accident