
This change duplicates the ideas started in with the placement-perfload job and builds on it to create a set of nested trees that can be exercised. In placement-perfload, placeload is used to create the providers. This proves to be cumbersome for nested topologies so this change starts a new model: Using parallel [1] plus instrumented gabbi to create nested topologies in a declarative fashion. gate/perfload-server.sh sets up placement db and starts a uwsgi server. gate/perfload-nested-loader.sh is called in the playbook to cause gabbi to create the nested topology described in gate/gabbits/nested-perfload.yaml. That topology is intentionally very naive right now but should be made more realisitc as we continue to develop nested features. There's some duplication between perfload.yaml and nested-perfload.yaml that will be cleared up in a followup. [1] https://www.gnu.org/software/parallel/ (although the version on ubuntu is a non-GPL clone) Story: 2005443 Task: 30487 Change-Id: I617161fde5b844d7f52dc766f85c1b9f1b139e4a
95 lines
3.5 KiB
Bash
Executable File
95 lines
3.5 KiB
Bash
Executable File
#!/bin/bash -x
|
|
WORK_DIR=$1
|
|
|
|
PLACEMENT_URL="http://localhost:8000"
|
|
LOG=placement-perf.txt
|
|
LOG_DEST=${WORK_DIR}/logs
|
|
# The gabbit used to create one nested provider tree. It takes
|
|
# inputs from LOADER to create a unique tree.
|
|
GABBIT=gate/gabbits/nested-perfload.yaml
|
|
LOADER=gate/perfload-nested-loader.sh
|
|
|
|
# The query to be used to get a list of allocation candidates. If
|
|
# $GABBIT is changed, this may need to change.
|
|
TRAIT="COMPUTE_VOLUME_MULTI_ATTACH"
|
|
TRAIT1="HW_CPU_X86_AVX2"
|
|
PLACEMENT_QUERY="resources=DISK_GB:10&resources1=VCPU:1,MEMORY_MB:256&required=${TRAIT}&required1=${TRAIT1}&group_policy=isolate"
|
|
|
|
# Number of nested trees to create.
|
|
ITERATIONS=1000
|
|
|
|
# Number of times to write allocations and then time again.
|
|
ALLOCATIONS_TO_WRITE=10
|
|
|
|
# The number of providers in each nested tree. This will need to
|
|
# need to change whenever the resource provider topology created in
|
|
# $GABBIT is changed.
|
|
PROVIDER_TOPOLOGY_COUNT=3
|
|
# Expected total number of providers, used to check that creation
|
|
# was a success.
|
|
TOTAL_PROVIDER_COUNT=$((ITERATIONS * PROVIDER_TOPOLOGY_COUNT))
|
|
|
|
trap "sudo cp -p $LOG $LOG_DEST" EXIT
|
|
|
|
function time_candidates {
|
|
(
|
|
echo "##### TIMING GET /allocation_candidates?${PLACEMENT_QUERY} twice"
|
|
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
|
|
time curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}" > /dev/null
|
|
) 2>&1 | tee -a $LOG
|
|
}
|
|
|
|
function write_allocation {
|
|
# Take the first allocation request and send it back as a well-formed allocation
|
|
curl -s -H 'x-auth-token: admin' -H 'openstack-api-version: placement latest' "${PLACEMENT_URL}/allocation_candidates?${PLACEMENT_QUERY}&limit=5" \
|
|
| jq --arg proj $(uuidgen) --arg user $(uuidgen) '.allocation_requests[0] + {consumer_generation: null, project_id: $proj, user_id: $user}' \
|
|
| curl -s -H 'x-auth-token: admin' -H 'content-type: application/json' -H 'openstack-api-version: placement latest' \
|
|
-X PUT -d @- "${PLACEMENT_URL}/allocations/$(uuidgen)"
|
|
}
|
|
|
|
function load_candidates {
|
|
time_candidates
|
|
for iter in $(seq 1 $ALLOCATIONS_TO_WRITE); do
|
|
echo "##### Writing allocation ${iter}" | tee -a $LOG
|
|
write_allocation
|
|
time_candidates
|
|
done
|
|
}
|
|
|
|
function check_placement {
|
|
local rp_count
|
|
local code
|
|
code=0
|
|
|
|
python -m virtualenv -p python3 .perfload
|
|
. .perfload/bin/activate
|
|
|
|
# install placeload
|
|
pip install gabbi
|
|
|
|
# Create $TOTAL_PROVIDER_COUNT nested resource provider trees,
|
|
# each tree having $PROVIDER_TOPOLOGY_COUNT resource providers.
|
|
# LOADER is called $ITERATIONS times in parallel by 3 * number
|
|
# of processors on the host.
|
|
echo "##### Creating $TOTAL_PROVIDER_COUNT providers" | tee -a $LOG
|
|
seq 1 $ITERATIONS | parallel -P 3% $LOADER $PLACEMENT_URL $GABBIT
|
|
|
|
set +x
|
|
rp_count=$(curl -H 'x-auth-token: admin' ${PLACEMENT_URL}/resource_providers |json_pp|grep -c '"name"')
|
|
# Skip curl and note if we failed to create the required number of rps
|
|
if [[ $rp_count -ge $TOTAL_PROVIDER_COUNT ]]; then
|
|
load_candidates
|
|
else
|
|
(
|
|
echo "Unable to create expected number of resource providers. Expected: ${COUNT}, Got: $rp_count"
|
|
echo "See job-output.txt.gz and logs/screen-placement-api.txt.gz for additional detail."
|
|
) | tee -a $LOG
|
|
code=1
|
|
fi
|
|
set -x
|
|
deactivate
|
|
exit $code
|
|
}
|
|
|
|
check_placement
|