Files
placement/gate/perfload-nested-runner.sh
Chris Dent 07d7749cff Implement a more complex nested-perfload topology
This changes gabbits/nested-perfload.yaml to create a tree of
providers based on one of the compute nodes in the NUMANetworkFixture
used in the functional tests. For the time being only one type of
compute node is created (of which there will be 1000 instances).
Room is left for future expansion as requirements expand.

The resulting hierarchy has 7 resource providers.

The allocation candidates query is:

GET /allocation_candidates?
    resources=DISK_GB:10&
    required=COMPUTE_VOLUME_MULTI_ATTACH&
    resources_COMPUTE=VCPU:1,MEMORY_MB:256&
    required_COMPUTE=CUSTOM_FOO&
    resources_FPGA=FPGA:1&
    group_policy=none&
    same_subtree=_COMPUTE,_FPGA

This is a step in the right direction but is not yet a complete
exercising of all the nested functionality. It is, however, more
complex than prior, notably testing 'same_subtree'. We should
continue to iterate to get it doing more.

Change-Id: I67d8091b464cd7b875b37766f52818a5a2faa780
Story: 2005443
Task: 35669
2019-08-06 09:18:39 +01:00

96 lines
3.6 KiB
Bash
Executable File

#!/bin/bash -x
WORK_DIR=$1
PLACEMENT_URL="http://localhost:8000"
LOG=placement-perf.txt
LOG_DEST=${WORK_DIR}/logs
# The gabbit used to create one nested provider tree. It takes
# inputs from LOADER to create a unique tree.
GABBIT=gate/gabbits/nested-perfload.yaml
LOADER=gate/perfload-nested-loader.sh
# The query to be used to get a list of allocation candidates. If
# $GABBIT is changed, this may need to change.
TRAIT="COMPUTE_VOLUME_MULTI_ATTACH"
TRAIT1="CUSTOM_FOO"
PLACEMENT_QUERY="resources=DISK_GB:10&required=${TRAIT}&resources_COMPUTE=VCPU:1,MEMORY_MB:256&required_COMPUTE=${TRAIT1}&resources_FPGA=FPGA:1&group_policy=none&same_subtree=_COMPUTE,_FPGA"
# Number of nested trees to create.
ITERATIONS=1000
# Number of times to write allocations and then time again.
ALLOCATIONS_TO_WRITE=10
# The number of providers in each nested tree. This will need to
# change whenever the resource provider topology created in $GABBIT
# is changed.
PROVIDER_TOPOLOGY_COUNT=7
# Expected total number of providers, used to check that creation
# was a success.
TOTAL_PROVIDER_COUNT=$((ITERATIONS * PROVIDER_TOPOLOGY_COUNT))
trap "sudo cp -p $LOG $LOG_DEST" EXIT
function time_candidates {
(
echo "##### TIMING GET /allocation_candidates?${PLACEMENT_QUERY} twice"
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
) 2>&1 | tee -a $LOG
}
function write_allocation {
# Take the first allocation request and send it back as a well-formed allocation
curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}&limit=5" \
| jq --arg proj $(uuidgen) --arg user $(uuidgen) '.allocation_requests[0] + {consumer_generation: null, project_id: $proj, user_id: $user}' \
| curl -s -H 'x-auth-token: admin' -H 'content-type: application/json' -H 'openstack-api-version: placement latest' \
-X PUT -d @- "${PLACEMENT_URL}/allocations/$(uuidgen)"
}
function load_candidates {
time_candidates
for iter in $(seq 1 $ALLOCATIONS_TO_WRITE); do
echo "##### Writing allocation ${iter}" | tee -a $LOG
write_allocation
time_candidates
done
}
function check_placement {
local rp_count
local code
code=0
python -m virtualenv -p python3 .perfload
. .perfload/bin/activate
# install gabbi
pip install gabbi
# Create $TOTAL_PROVIDER_COUNT nested resource provider trees,
# each tree having $PROVIDER_TOPOLOGY_COUNT resource providers.
# LOADER is called $ITERATIONS times in parallel using 50% of
# the number of processors on the host.
echo "##### Creating $TOTAL_PROVIDER_COUNT providers" | tee -a $LOG
seq 1 $ITERATIONS | parallel -P 50% $LOADER $PLACEMENT_URL $GABBIT
set +x
rp_count=$(curl -H 'x-auth-token: admin' ${PLACEMENT_URL}/resource_providers |json_pp|grep -c '"name"')
# If we failed to create the required number of rps, skip measurements and
# log a message.
if [[ $rp_count -ge $TOTAL_PROVIDER_COUNT ]]; then
load_candidates
else
(
echo "Unable to create expected number of resource providers. Expected: ${COUNT}, Got: $rp_count"
echo "See job-output.txt.gz and logs/placement-api.log for additional detail."
) | tee -a $LOG
code=1
fi
set -x
deactivate
exit $code
}
check_placement