Add benchmarking test suite and greatly improve performance in a few cases (#948)

* Add benchmarking test suite

* Improve amortized time of model relation loads with a large number of rows

* Improve performance of loading models with many related models

* Improve performance of loading models with many related models to O(N)ish

* Fix bug where N model creation with shared related model would build in N^2 time

* Lower blocking time for queryset results

* Add docstrings and streamline hash code

Co-authored-by: haydeec1 <Eric.Haydel@jhuapl.edu>
This commit is contained in:
erichaydel
2022-12-10 11:12:11 -05:00
committed by GitHub
parent 171ef2ffaa
commit 7c18fa55e7
25 changed files with 1250 additions and 230 deletions

View File

@ -0,0 +1,27 @@
import random
import string
from typing import List
import pytest
from benchmarks.conftest import Author
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize("num_models", [10, 20, 40])
async def test_updating_models_individually(
aio_benchmark, num_models: int, authors_in_db: List[Author]
):
starting_first_name = authors_in_db[0].name
@aio_benchmark
async def update(authors: List[Author]):
for author in authors:
a = await author.update(
name="".join(random.sample(string.ascii_letters, 5))
)
update(authors_in_db)
author = await Author.objects.get(id=authors_in_db[0].id)
assert author.name != starting_first_name