* WIP * WIP - make test_model_definition tests pass * WIP - make test_model_methods pass * WIP - make whole test suit at least run - failing 49/443 tests * WIP fix part of the getting pydantic tests as types of fields are now kept in core schema and not on fieldsinfo * WIP fix validation in update by creating individual fields validators, failing 36/443 * WIP fix __pydantic_extra__ in intializing model, fix test related to pydantic config checks, failing 32/442 * WIP - fix enum schema in model_json_schema, failing 31/442 * WIP - fix copying through model, fix setting pydantic fields on through, fix default config and inheriting from it, failing 26/442 * WIP fix tests checking pydantic schema, fix excluding parent fields, failing 21/442 * WIP some missed files * WIP - fix validators inheritance and fix validators in generated pydantic, failing 17/442 * WIP - fix through models setting - only on reverse side of relation, but always on reverse side, failing 15/442 * WIP - fix through models setting - only on reverse side of relation, but always on reverse side, failing 15/442 * WIP - working on proper populating __dict__ for relations for new schema dumping, some work on openapi docs, failing 13/442 * WIP - remove property fields as pydantic has now computed_field on its own, failing 9/442 * WIP - fixes in docs, failing 8/442 * WIP - fix tests for largebinary schema, wrapped bytes fields fail in pydantic, will be fixed in pydantic-core, remaining is circural schema for related models, failing 6/442 * WIP - fix to pk only models in schemas * Getting test suites to pass (#1249) * wip, fixing tests * iteration, fixing some more tests * iteration, fixing some more tests * adhere to comments * adhere to comments * remove unnecessary dict call, re-add getattribute for testing * todo for reverse relationship * adhere to comments, remove prints * solve circular refs * all tests pass 🎉 * remove 3.7 from tests * add lint and type check jobs * reforat with ruff, fix jobs * rename jobs * fix imports * fix evaluate in py3.8 * partially fix coverage * fix coverage, add more tests * fix test ids * fix test ids * fix lint, fix docs, make docs fully working scripts, add test docs job * fix pyproject * pin py ver in test docs * change dir in test docs * fix pydantic warning hack * rm poetry call in test_docs * switch to pathlib in test docs * remove coverage req test docs * fix type check tests, fix part of types * fix/skip next part of types * fix next part of types * fix next part of types * fix coverage * fix coverage * fix type (bit dirty 🤷) * fix some code smells * change pre-commit * tweak workflows * remove no root from tests * switch to full python path by passing sys.executable * some small refactor in new base model, one sample test, change makefile * small refactors to reduce complexity of methods * temp add tests for prs against pydantic_v2 * remove all references to __fields__ * remove all references to construct, deprecate the method and update model_construct to be in line with pydantic * deprecate dict and add model_dump, todo switch to model_dict in calls * fix tests * change to union * change to union * change to model_dump and model_dump_json from dict and json deprecated methods, deprecate them in ormar too * finish switching dict() -> model_dump() * finish switching json() -> model_dump_json() * remove fully pydantic_only * switch to extra for payment card, change missed json calls * fix coverage - no more warnings internal * fix coverage - no more warnings internal - part 2 * split model_construct into own and pydantic parts * split determine pydantic field type * change to new field validators * fix benchmarks, add codspeed instead of pytest-benchmark, add action and gh workflow * restore pytest-benchmark * remove codspeed * pin pydantic version, restore codspeed * change on push to pydantic_v2 to trigger first one * Use lifespan function instead of event (#1259) * check return types * fix imports order, set warnings=False on json that passes the dict, fix unnecessary loop in one of the test * remove references to model's meta as it's now ormar config, rename related methods too * filter out pydantic serializer warnings * remove choices leftovers * remove leftovers after property_fields, keep only enough to exclude them in initialization * add migration guide * fix meta references * downgrade databases for now * Change line numbers in documentation (#1265) * proofread and fix the docs, part 1 * proofread and fix the docs for models * proofread and fix the docs for fields * proofread and fix the docs for relations * proofread and fix rest of the docs, add release notes for 0.20 * create tables in new docs src * cleanup old deps, uncomment docs publish on tag * fix import reorder --------- Co-authored-by: TouwaStar <30479449+TouwaStar@users.noreply.github.com> Co-authored-by: Goran Mekić <meka@tilda.center>
177 lines
5.6 KiB
Python
177 lines
5.6 KiB
Python
from typing import Optional
|
|
|
|
import ormar
|
|
import pytest
|
|
import pytest_asyncio
|
|
from ormar.exceptions import QueryDefinitionError
|
|
|
|
from tests.lifespan import init_tests
|
|
from tests.settings import create_config
|
|
|
|
base_ormar_config = create_config()
|
|
|
|
|
|
class Author(ormar.Model):
|
|
ormar_config = base_ormar_config.copy(tablename="authors", order_by=["-name"])
|
|
|
|
id: int = ormar.Integer(primary_key=True)
|
|
name: str = ormar.String(max_length=100)
|
|
|
|
|
|
class Book(ormar.Model):
|
|
ormar_config = base_ormar_config.copy(
|
|
tablename="books", order_by=["year", "-ranking"]
|
|
)
|
|
|
|
id: int = ormar.Integer(primary_key=True)
|
|
author: Optional[Author] = ormar.ForeignKey(Author)
|
|
title: str = ormar.String(max_length=100)
|
|
year: int = ormar.Integer(nullable=True)
|
|
ranking: int = ormar.Integer(nullable=True)
|
|
|
|
|
|
create_test_database = init_tests(base_ormar_config)
|
|
|
|
|
|
@pytest_asyncio.fixture(autouse=True, scope="function")
|
|
async def cleanup():
|
|
yield
|
|
async with base_ormar_config.database:
|
|
await Book.objects.delete(each=True)
|
|
await Author.objects.delete(each=True)
|
|
|
|
|
|
async def sample_data():
|
|
author = await Author(name="Author 1").save()
|
|
await Book(title="Book 1", year=1920, ranking=3, author=author).save()
|
|
await Book(title="Book 2", year=1930, ranking=1, author=author).save()
|
|
await Book(title="Book 3", year=1923, ranking=5, author=author).save()
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_min_method():
|
|
async with base_ormar_config.database:
|
|
await sample_data()
|
|
assert await Book.objects.min("year") == 1920
|
|
result = await Book.objects.min(["year", "ranking"])
|
|
assert result == dict(year=1920, ranking=1)
|
|
|
|
assert await Book.objects.min("title") == "Book 1"
|
|
|
|
assert await Author.objects.select_related("books").min("books__year") == 1920
|
|
result = await Author.objects.select_related("books").min(
|
|
["books__year", "books__ranking"]
|
|
)
|
|
assert result == dict(books__year=1920, books__ranking=1)
|
|
|
|
assert (
|
|
await Author.objects.select_related("books")
|
|
.filter(books__year__gt=1925)
|
|
.min("books__year")
|
|
== 1930
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_max_method():
|
|
async with base_ormar_config.database:
|
|
await sample_data()
|
|
assert await Book.objects.max("year") == 1930
|
|
result = await Book.objects.max(["year", "ranking"])
|
|
assert result == dict(year=1930, ranking=5)
|
|
|
|
assert await Book.objects.max("title") == "Book 3"
|
|
|
|
assert await Author.objects.select_related("books").max("books__year") == 1930
|
|
result = await Author.objects.select_related("books").max(
|
|
["books__year", "books__ranking"]
|
|
)
|
|
assert result == dict(books__year=1930, books__ranking=5)
|
|
|
|
assert (
|
|
await Author.objects.select_related("books")
|
|
.filter(books__year__lt=1925)
|
|
.max("books__year")
|
|
== 1923
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_sum_method():
|
|
async with base_ormar_config.database:
|
|
await sample_data()
|
|
assert await Book.objects.sum("year") == 5773
|
|
result = await Book.objects.sum(["year", "ranking"])
|
|
assert result == dict(year=5773, ranking=9)
|
|
|
|
with pytest.raises(QueryDefinitionError):
|
|
await Book.objects.sum("title")
|
|
|
|
assert await Author.objects.select_related("books").sum("books__year") == 5773
|
|
result = await Author.objects.select_related("books").sum(
|
|
["books__year", "books__ranking"]
|
|
)
|
|
assert result == dict(books__year=5773, books__ranking=9)
|
|
|
|
assert (
|
|
await Author.objects.select_related("books")
|
|
.filter(books__year__lt=1925)
|
|
.sum("books__year")
|
|
== 3843
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_avg_method():
|
|
async with base_ormar_config.database:
|
|
await sample_data()
|
|
assert round(float(await Book.objects.avg("year")), 2) == 1924.33
|
|
result = await Book.objects.avg(["year", "ranking"])
|
|
assert round(float(result.get("year")), 2) == 1924.33
|
|
assert result.get("ranking") == 3.0
|
|
|
|
with pytest.raises(QueryDefinitionError):
|
|
await Book.objects.avg("title")
|
|
|
|
result = await Author.objects.select_related("books").avg("books__year")
|
|
assert round(float(result), 2) == 1924.33
|
|
result = await Author.objects.select_related("books").avg(
|
|
["books__year", "books__ranking"]
|
|
)
|
|
assert round(float(result.get("books__year")), 2) == 1924.33
|
|
assert result.get("books__ranking") == 3.0
|
|
|
|
assert (
|
|
await Author.objects.select_related("books")
|
|
.filter(books__year__lt=1925)
|
|
.avg("books__year")
|
|
== 1921.5
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_queryset_method():
|
|
async with base_ormar_config.database:
|
|
await sample_data()
|
|
author = await Author.objects.select_related("books").get()
|
|
assert await author.books.min("year") == 1920
|
|
assert await author.books.max("year") == 1930
|
|
assert await author.books.sum("ranking") == 9
|
|
assert await author.books.avg("ranking") == 3.0
|
|
assert await author.books.max(["year", "title"]) == dict(
|
|
year=1930, title="Book 3"
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_count_method():
|
|
async with base_ormar_config.database:
|
|
await sample_data()
|
|
|
|
count = await Author.objects.select_related("books").count()
|
|
assert count == 1
|
|
|
|
# The legacy functionality
|
|
count = await Author.objects.select_related("books").count(distinct=False)
|
|
assert count == 3
|