* WIP * WIP - make test_model_definition tests pass * WIP - make test_model_methods pass * WIP - make whole test suit at least run - failing 49/443 tests * WIP fix part of the getting pydantic tests as types of fields are now kept in core schema and not on fieldsinfo * WIP fix validation in update by creating individual fields validators, failing 36/443 * WIP fix __pydantic_extra__ in intializing model, fix test related to pydantic config checks, failing 32/442 * WIP - fix enum schema in model_json_schema, failing 31/442 * WIP - fix copying through model, fix setting pydantic fields on through, fix default config and inheriting from it, failing 26/442 * WIP fix tests checking pydantic schema, fix excluding parent fields, failing 21/442 * WIP some missed files * WIP - fix validators inheritance and fix validators in generated pydantic, failing 17/442 * WIP - fix through models setting - only on reverse side of relation, but always on reverse side, failing 15/442 * WIP - fix through models setting - only on reverse side of relation, but always on reverse side, failing 15/442 * WIP - working on proper populating __dict__ for relations for new schema dumping, some work on openapi docs, failing 13/442 * WIP - remove property fields as pydantic has now computed_field on its own, failing 9/442 * WIP - fixes in docs, failing 8/442 * WIP - fix tests for largebinary schema, wrapped bytes fields fail in pydantic, will be fixed in pydantic-core, remaining is circural schema for related models, failing 6/442 * WIP - fix to pk only models in schemas * Getting test suites to pass (#1249) * wip, fixing tests * iteration, fixing some more tests * iteration, fixing some more tests * adhere to comments * adhere to comments * remove unnecessary dict call, re-add getattribute for testing * todo for reverse relationship * adhere to comments, remove prints * solve circular refs * all tests pass 🎉 * remove 3.7 from tests * add lint and type check jobs * reforat with ruff, fix jobs * rename jobs * fix imports * fix evaluate in py3.8 * partially fix coverage * fix coverage, add more tests * fix test ids * fix test ids * fix lint, fix docs, make docs fully working scripts, add test docs job * fix pyproject * pin py ver in test docs * change dir in test docs * fix pydantic warning hack * rm poetry call in test_docs * switch to pathlib in test docs * remove coverage req test docs * fix type check tests, fix part of types * fix/skip next part of types * fix next part of types * fix next part of types * fix coverage * fix coverage * fix type (bit dirty 🤷) * fix some code smells * change pre-commit * tweak workflows * remove no root from tests * switch to full python path by passing sys.executable * some small refactor in new base model, one sample test, change makefile * small refactors to reduce complexity of methods * temp add tests for prs against pydantic_v2 * remove all references to __fields__ * remove all references to construct, deprecate the method and update model_construct to be in line with pydantic * deprecate dict and add model_dump, todo switch to model_dict in calls * fix tests * change to union * change to union * change to model_dump and model_dump_json from dict and json deprecated methods, deprecate them in ormar too * finish switching dict() -> model_dump() * finish switching json() -> model_dump_json() * remove fully pydantic_only * switch to extra for payment card, change missed json calls * fix coverage - no more warnings internal * fix coverage - no more warnings internal - part 2 * split model_construct into own and pydantic parts * split determine pydantic field type * change to new field validators * fix benchmarks, add codspeed instead of pytest-benchmark, add action and gh workflow * restore pytest-benchmark * remove codspeed * pin pydantic version, restore codspeed * change on push to pydantic_v2 to trigger first one * Use lifespan function instead of event (#1259) * check return types * fix imports order, set warnings=False on json that passes the dict, fix unnecessary loop in one of the test * remove references to model's meta as it's now ormar config, rename related methods too * filter out pydantic serializer warnings * remove choices leftovers * remove leftovers after property_fields, keep only enough to exclude them in initialization * add migration guide * fix meta references * downgrade databases for now * Change line numbers in documentation (#1265) * proofread and fix the docs, part 1 * proofread and fix the docs for models * proofread and fix the docs for fields * proofread and fix the docs for relations * proofread and fix rest of the docs, add release notes for 0.20 * create tables in new docs src * cleanup old deps, uncomment docs publish on tag * fix import reorder --------- Co-authored-by: TouwaStar <30479449+TouwaStar@users.noreply.github.com> Co-authored-by: Goran Mekić <meka@tilda.center>
92 lines
2.8 KiB
Python
92 lines
2.8 KiB
Python
import random
|
|
import string
|
|
|
|
import pytest
|
|
|
|
from benchmarks.conftest import Author, Book, Publisher
|
|
|
|
pytestmark = pytest.mark.asyncio
|
|
|
|
|
|
@pytest.mark.parametrize("num_models", [10, 20, 40])
|
|
async def test_creating_models_individually(aio_benchmark, num_models: int):
|
|
@aio_benchmark
|
|
async def create(num_models: int):
|
|
authors = []
|
|
for idx in range(0, num_models):
|
|
author = await Author.objects.create(
|
|
name="".join(random.sample(string.ascii_letters, 5)),
|
|
score=int(random.random() * 100),
|
|
)
|
|
authors.append(author)
|
|
return authors
|
|
|
|
authors = create(num_models)
|
|
for author in authors:
|
|
assert author.id is not None
|
|
|
|
|
|
@pytest.mark.parametrize("num_models", [10, 20, 40])
|
|
async def test_creating_individually_with_related_models(
|
|
aio_benchmark, num_models: int, author: Author, publisher: Publisher
|
|
):
|
|
@aio_benchmark
|
|
async def create_with_related_models(
|
|
author: Author, publisher: Publisher, num_models: int
|
|
):
|
|
books = []
|
|
for idx in range(0, num_models):
|
|
book = await Book.objects.create(
|
|
author=author,
|
|
publisher=publisher,
|
|
title="".join(random.sample(string.ascii_letters, 5)),
|
|
year=random.randint(0, 2000),
|
|
)
|
|
books.append(book)
|
|
|
|
return books
|
|
|
|
books = create_with_related_models(
|
|
author=author, publisher=publisher, num_models=num_models
|
|
)
|
|
|
|
for book in books:
|
|
assert book.id is not None
|
|
|
|
|
|
@pytest.mark.parametrize("num_models", [10, 20, 40])
|
|
async def test_get_or_create_when_create(aio_benchmark, num_models: int):
|
|
@aio_benchmark
|
|
async def get_or_create(num_models: int):
|
|
authors = []
|
|
for idx in range(0, num_models):
|
|
author, created = await Author.objects.get_or_create(
|
|
name="".join(random.sample(string.ascii_letters, 5)),
|
|
score=int(random.random() * 100),
|
|
)
|
|
assert created
|
|
authors.append(author)
|
|
return authors
|
|
|
|
authors = get_or_create(num_models)
|
|
for author in authors:
|
|
assert author.id is not None
|
|
|
|
|
|
@pytest.mark.parametrize("num_models", [10, 20, 40])
|
|
async def test_update_or_create_when_create(aio_benchmark, num_models: int):
|
|
@aio_benchmark
|
|
async def update_or_create(num_models: int):
|
|
authors = []
|
|
for idx in range(0, num_models):
|
|
author = await Author.objects.update_or_create(
|
|
name="".join(random.sample(string.ascii_letters, 5)),
|
|
score=int(random.random() * 100),
|
|
)
|
|
authors.append(author)
|
|
return authors
|
|
|
|
authors = update_or_create(num_models)
|
|
for author in authors:
|
|
assert author.id is not None
|