* Add benchmarking test suite * Improve amortized time of model relation loads with a large number of rows * Improve performance of loading models with many related models * Improve performance of loading models with many related models to O(N)ish * Fix bug where N model creation with shared related model would build in N^2 time * Lower blocking time for queryset results * Add docstrings and streamline hash code Co-authored-by: haydeec1 <Eric.Haydel@jhuapl.edu>
37 lines
867 B
Python
37 lines
867 B
Python
from typing import List
|
|
|
|
import pytest
|
|
|
|
from benchmarks.conftest import Author
|
|
|
|
pytestmark = pytest.mark.asyncio
|
|
|
|
|
|
@pytest.mark.parametrize("num_models", [250, 500, 1000])
|
|
async def test_deleting_all(
|
|
aio_benchmark, num_models: int, authors_in_db: List[Author]
|
|
):
|
|
@aio_benchmark
|
|
async def delete_all():
|
|
await Author.objects.delete(each=True)
|
|
|
|
delete_all()
|
|
|
|
num = await Author.objects.count()
|
|
assert num == 0
|
|
|
|
|
|
@pytest.mark.parametrize("num_models", [10, 20, 40])
|
|
async def test_deleting_individually(
|
|
aio_benchmark, num_models: int, authors_in_db: List[Author]
|
|
):
|
|
@aio_benchmark
|
|
async def delete_one_by_one(authors: List[Author]):
|
|
for author in authors:
|
|
await Author.objects.filter(id=author.id).delete()
|
|
|
|
delete_one_by_one(authors_in_db)
|
|
|
|
num = await Author.objects.count()
|
|
assert num == 0
|