Add benchmarking test suite and greatly improve performance in a few cases (#948)
* Add benchmarking test suite * Improve amortized time of model relation loads with a large number of rows * Improve performance of loading models with many related models * Improve performance of loading models with many related models to O(N)ish * Fix bug where N model creation with shared related model would build in N^2 time * Lower blocking time for queryset results * Add docstrings and streamline hash code Co-authored-by: haydeec1 <Eric.Haydel@jhuapl.edu>
This commit is contained in:
57
benchmarks/test_benchmark_aggregate.py
Normal file
57
benchmarks/test_benchmark_aggregate.py
Normal file
@ -0,0 +1,57 @@
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from benchmarks.conftest import Author
|
||||
|
||||
pytestmark = pytest.mark.asyncio
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_models", [250, 500, 1000])
|
||||
async def test_count(aio_benchmark, num_models: int, authors_in_db: List[Author]):
|
||||
@aio_benchmark
|
||||
async def count():
|
||||
return await Author.objects.count()
|
||||
|
||||
c = count()
|
||||
assert c == len(authors_in_db)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_models", [250, 500, 1000])
|
||||
async def test_avg(aio_benchmark, num_models: int, authors_in_db: List[Author]):
|
||||
@aio_benchmark
|
||||
async def avg():
|
||||
return await Author.objects.avg("score")
|
||||
|
||||
average = avg()
|
||||
assert 0 <= average <= 100
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_models", [250, 500, 1000])
|
||||
async def test_sum(aio_benchmark, num_models: int, authors_in_db: List[Author]):
|
||||
@aio_benchmark
|
||||
async def sum_():
|
||||
return await Author.objects.sum("score")
|
||||
|
||||
s = sum_()
|
||||
assert 0 <= s <= 100 * num_models
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_models", [250, 500, 1000])
|
||||
async def test_min(aio_benchmark, num_models: int, authors_in_db: List[Author]):
|
||||
@aio_benchmark
|
||||
async def min_():
|
||||
return await Author.objects.min("score")
|
||||
|
||||
m = min_()
|
||||
assert 0 <= m <= 100
|
||||
|
||||
|
||||
@pytest.mark.parametrize("num_models", [250, 500, 1000])
|
||||
async def test_max(aio_benchmark, num_models: int, authors_in_db: List[Author]):
|
||||
@aio_benchmark
|
||||
async def max_():
|
||||
return await Author.objects.max("score")
|
||||
|
||||
m = max_()
|
||||
assert 0 <= m <= 100
|
||||
Reference in New Issue
Block a user