#!/bin/bash
set -e

DIR=$(readlink -f $(dirname $0))

. "$DIR"/run-tests.defaults.sh

# Some tests are "multiplied" by the method for accessing the repo (the one
# specified in sources.list). The user may override them with this parameter.
#
# Here we actually have much more "pseudo-methods", where the base methods are
# further modified with various details about how the connection is set up
# to be made by apt.
#
# Of all the testcases to be run, you can select only those which employ specific
# quasi-method(s) by setting APT_RUN_TEST_ONLY_IF_METHOD_MATCHES; it can be set
# to the empty string to select only the testcases not "multiplied" by methods
# (those without a repo). If unset, then the set of run tests is not affected.
readonly -a APT_TEST_METHODS=(${APT_TEST_METHODS-"${APT_TEST_ALL_METHODS[@]}"})
readonly -a APT_TEST_http_METHODS=(${APT_TEST_http_METHODS-"${APT_TEST_ALL_http_METHODS[@]}"})

FAIL=0
XFAIL=0
XPASS=0
PASS=0
ALL=0

FAILED_TESTS=""
XFAILED_TESTS=""
XPASSED_TESTS=""

# Global parameters which affect the tests (or the framework)
# and must be set before running the tests and using the framework:

# For multiple instances to be run in parallel under xargs:
# PARALLEL_SLOT (unset if not run in parallel under xargs)

# If set, the built pkgs will be written under a filename matching our alias
# in tests (the name of the spec). (Useful, among other uses, if the usual name
# would be too long.)
APT_TEST_PKG_FILENAME_BY_ALIAS="${APT_TEST_PKG_FILENAME_BY_ALIAS-}"
export APT_TEST_PKG_FILENAME_BY_ALIAS

# If non-empty, test failures won't count as failures
# (rather as expected failures due to known bugs).
export APT_TEST_XFAIL="${APT_TEST_XFAIL-}"

# If empty or unset, a temporary dir will be created and then cleaned up.
# Setting this parameter explicitly is useful for debugging.
export APT_TEST_WORKINGDIRECTORY="${APT_TEST_WORKINGDIRECTORY-}"

# If empty or unset, a place under the temporary dir will be used.
# Setting this parameter explicitly means that built packages are cached between
# individual tests (for speed) and is useful for debugging afterwards.
export APT_TEST_INTERMEDIATES="${APT_TEST_INTERMEDIATES-}"
# Clean old stuff (those pkgs might have been built with different parameters).
if [ -n "$APT_TEST_INTERMEDIATES" ]; then
    # Bad idea if several concurrent ./run-tests use the same cache dir
    # (perhaps, a lock inside it could be an indicator whether it is used):
    #rm -rf "$APT_TEST_INTERMEDIATES"

    # So, we simply warn instead:
    printf >&2 'WARNING: going to re-use intermediates cached in %s
(Those are built pkgs etc. if they exist. Also going to save new stuff there.)
WARNING: Clean old stuff (built with different params) yourself if needed!\n' \
	       "$APT_TEST_INTERMEDIATES"
fi

# arch parameters:

# The arch (the machine arch) as detected by rpm or apt
# (and as expected to appear in the APT conf by default).
#
# If empty, it will be set to a best guessed value.
export APT_TEST_DEFAULT_ARCH="${APT_TEST_DEFAULT_ARCH-}"

# If non-empty, forces a custom --target value to be passed when building pkgs.
# If empty, no --target option will be passed when building pkgs.
export APT_TEST_TARGET="${APT_TEST_TARGET-}"

# The GPG key to apply rpm --import, so that all the tests are run with such db.
# If empty, no GPG key is imported into the db.
export APT_TEST_GPGPUBKEY="${APT_TEST_GPGPUBKEY-}"

# The compression options for the basedir can be forced by the environment.
# (Some methods don't work without some option;
# that determines which ones are chosen by default.)
export APT_TEST_BASEDIR_COMPRESSION_OPTS="${APT_TEST_BASEDIR_COMPRESSION_OPTS-}"

# Additional options for genbasedir.
export APT_TEST_GENBASEDIR_OPTS="${APT_TEST_GENBASEDIR_OPTS-}"

# setupenvironment()'s parameters: if non-empty, they override the APT config.
export METHODSDIR="${METHODSDIR-}" # could override Dir::Bin::methods

# generaterepository()'s parameters: if non-empty, they are used; otherwise,
# there are default values in that function definition (in framework).
export GB_REPO_LABEL="${GB_REPO_LABEL-}"
export GB_REPO_DESCRIPTION="${GB_REPO_DESCRIPTION-}"
export GB_REPO_ARCHIVE="${GB_REPO_ARCHIVE-}"
export GB_REPO_CODENAME="${GB_REPO_CODENAME-}"
export GB_REPO_ORIGIN="${GB_REPO_ORIGIN-}"
export GB_REPO_SUITE="${GB_REPO_SUITE-}"
export GB_REPO_VERSION="${GB_REPO_VERSION-}"

# MSGLEVEL and MSGCOLOR parameters:

while [ -n "$1" ]; do
	if [ "$1" = '-q' ]; then
		export MSGLEVEL=2
	elif [ "$1" = '-v' ]; then
		export MSGLEVEL=4
	elif [ "$1" = '--color=no' ]; then
		export MSGCOLOR='NO'
	else
		echo >&2 "WARNING: Unknown parameter »$1« will be ignored"
	fi
	shift
done
export MSGLEVEL="${MSGLEVEL:-3}"

export MSGCOLOR="${MSGCOLOR:-YES}"

if [ "$MSGCOLOR" != 'NO' ]; then
	if [ ! -t 1 ]; then # but check that we output to a terminal
		export MSGCOLOR='NO'
	fi
fi
if [ "$MSGCOLOR" != 'NO' ]; then
	CTEST='\033[1;32m'
	CHIGH='\033[1;35m'
	CRESET='\033[0m'
else
	CTEST=''
	CHIGH=''
	CRESET=''
fi

runtest() {
	local -r testscript="$1"; shift
	local method
	if [ -n "${1-}" ]; then
		local -r method="$1"; shift
	fi

	local -r testcase="$testscript${method:+_via_$method}"
	if [ "$MSGLEVEL" -le 2 ]; then
		printf >&2 "(%d/%d) ${CTEST}Testcase ${CHIGH}%s${CRESET}: " $(($ALL+1)) "${TOTAL}" "$testcase"
	else
		printf >&2 "${CTEST}Run Testcase (%d/%d) ${CHIGH}%s${CRESET}\n" $(($ALL+1)) "${TOTAL}" "$testcase"
	fi
        APT_TEST_METHOD="${method:-}" \
		./${testscript} && PASS=$((PASS+1)) || {
			local -r testresult=$?
			if [ "$testresult" -eq 255 ]; then
				XFAIL=$((XFAIL+1))
				XFAILED_TESTS="$XFAILED_TESTS $testcase"
				echo >&2 "$testcase ... XFAIL"
			elif [ "$testresult" -eq 254 ]; then
				XPASS=$((XPASS+1))
				XPASSED_TESTS="$XPASSED_TESTS $testcase"
				echo >&2 "$testcase ... XPASS"
			else
				FAIL=$((FAIL+1))
				FAILED_TESTS="$FAILED_TESTS $testcase"
				echo >&2 "$testcase ... FAIL"
			fi
	}
	ALL=$((ALL+1))
	if [ "$MSGLEVEL" -le 2 ]; then
		echo
	fi
}

forall_testcases() {
    local -r cmd="$1"; shift

    local testscript method
    for testscript in test-*; do
	case "$testscript" in

	    test-without-repo-*)
		if [[ '' == ${APT_RUN_TEST_ONLY_IF_METHOD_MATCHES-''} ]]
		then
		    "$cmd" "$testscript"
		fi
		;;

	    test-apt-method-http*)
		for method in "${APT_TEST_http_METHODS[@]}"; do
		    if [[ "$method" == ${APT_RUN_TEST_ONLY_IF_METHOD_MATCHES-"$method"} ]]
		    then
			"$cmd" "$testscript" "$method"
		    fi
		done
		;;

	    *)
		for method in "${APT_TEST_METHODS[@]}"; do
		    if [[ "$method" == ${APT_RUN_TEST_ONLY_IF_METHOD_MATCHES-"$method"} ]]
		    then
			"$cmd" "$testscript" "$method"
		    fi
		done
		;;

	esac
    done
}

readonly TOTAL=$(forall_testcases echo | wc -l)

forall_testcases runtest

echo >&2 "Statistics: $ALL tests were run: $PASS successfully and $FAIL failed and $XFAIL xfailed and $XPASS xpassed"
if [ -n "$XFAILED_TESTS" ]; then
	echo >&2 "XFailed tests:"
        for t in $XFAILED_TESTS; do echo "$t"; done >&2
	echo >&2
fi
if [ -z "$FAILED_TESTS" ] && [ -z "$XPASSED_TESTS" ]; then
	echo >&2 'All tests seem to have been run successfully. What could possibly go wrong?'
else
	if [ -n "$FAILED_TESTS" ]; then
		echo >&2 "Failed tests:"
	        for t in $FAILED_TESTS; do echo "$t"; done >&2
		echo >&2
	fi
	if [ -n "$XPASSED_TESTS" ]; then
		echo >&2 "UneXpectedly passed tests:"
	        for t in $XPASSED_TESTS; do echo "$t"; done >&2
		echo >&2
	fi
fi

# ensure we don't overflow
if [ "$FAIL" -gt 0 ]; then
	exit $((FAIL <= 255 ? FAIL : 255))
elif [ "$XPASS" -gt 0 ]; then
	exit $((XPASS <= 255 ? XPASS : 255))
fi
