Merge pull request #82 from collerek/inheritance_1

Introduce inheritance
This commit is contained in:
collerek
2021-01-06 23:01:40 +07:00
committed by GitHub
119 changed files with 14846 additions and 1859 deletions

View File

@ -130,11 +130,11 @@ album = await Album.objects.select_related("tracks").all()
assert len(album.tracks) == 3 assert len(album.tracks) == 3
# Fetch instances, with a filter across an FK relationship. # Fetch instances, with a filter across an FK relationship.
tracks = Track.objects.filter(album__name="Fantasies") tracks = await Track.objects.filter(album__name="Fantasies").all()
assert len(tracks) == 2 assert len(tracks) == 2
# Fetch instances, with a filter and operator across an FK relationship. # Fetch instances, with a filter and operator across an FK relationship.
tracks = Track.objects.filter(album__name__iexact="fantasies") tracks = await Track.objects.filter(album__name__iexact="fantasies").all()
assert len(tracks) == 2 assert len(tracks) == 2
# Limit a query # Limit a query
@ -149,6 +149,7 @@ assert len(tracks) == 1
* `create(**kwargs): -> Model` * `create(**kwargs): -> Model`
* `get(**kwargs): -> Model` * `get(**kwargs): -> Model`
* `get_or_create(**kwargs) -> Model` * `get_or_create(**kwargs) -> Model`
* `first(): -> Model`
* `update(each: bool = False, **kwargs) -> int` * `update(each: bool = False, **kwargs) -> int`
* `update_or_create(**kwargs) -> Model` * `update_or_create(**kwargs) -> Model`
* `bulk_create(objects: List[Model]) -> None` * `bulk_create(objects: List[Model]) -> None`

89
docs/api/exceptions.md Normal file
View File

@ -0,0 +1,89 @@
<a name="exceptions"></a>
# exceptions
Gathers all exceptions thrown by ormar.
<a name="exceptions.AsyncOrmException"></a>
## AsyncOrmException Objects
```python
class AsyncOrmException(Exception)
```
Base ormar Exception
<a name="exceptions.ModelDefinitionError"></a>
## ModelDefinitionError Objects
```python
class ModelDefinitionError(AsyncOrmException)
```
Raised for errors related to the model definition itself:
* setting @property_field on method with arguments other than func(self)
* defining a Field without required parameters
* defining a model with more than one primary_key
* defining a model without primary_key
* setting primary_key column as pydantic_only
<a name="exceptions.ModelError"></a>
## ModelError Objects
```python
class ModelError(AsyncOrmException)
```
Raised for initialization of model with non-existing field keyword.
<a name="exceptions.NoMatch"></a>
## NoMatch Objects
```python
class NoMatch(AsyncOrmException)
```
Raised for database queries that has no matching result (empty result).
<a name="exceptions.MultipleMatches"></a>
## MultipleMatches Objects
```python
class MultipleMatches(AsyncOrmException)
```
Raised for database queries that should return one row (i.e. get, first etc.)
but has multiple matching results in response.
<a name="exceptions.QueryDefinitionError"></a>
## QueryDefinitionError Objects
```python
class QueryDefinitionError(AsyncOrmException)
```
Raised for errors in query definition:
* using contains or icontains filter with instance of the Model
* using Queryset.update() without filter and setting each flag to True
* using Queryset.delete() without filter and setting each flag to True
<a name="exceptions.ModelPersistenceError"></a>
## ModelPersistenceError Objects
```python
class ModelPersistenceError(AsyncOrmException)
```
Raised for update of models without primary_key set (cannot retrieve from db)
or for saving a model with relation to unsaved model (cannot extract fk value).
<a name="exceptions.SignalDefinitionError"></a>
## SignalDefinitionError Objects
```python
class SignalDefinitionError(AsyncOrmException)
```
Raised when non callable receiver is passed as signal callback.

View File

@ -0,0 +1,238 @@
<a name="fields.base"></a>
# fields.base
<a name="fields.base.BaseField"></a>
## BaseField Objects
```python
class BaseField(FieldInfo)
```
BaseField serves as a parent class for all basic Fields in ormar.
It keeps all common parameters available for all fields as well as
set of useful functions.
All values are kept as class variables, ormar Fields are never instantiated.
Subclasses pydantic.FieldInfo to keep the fields related
to pydantic field types like ConstrainedStr
<a name="fields.base.BaseField.is_valid_uni_relation"></a>
#### is\_valid\_uni\_relation
```python
| @classmethod
| is_valid_uni_relation(cls) -> bool
```
Checks if field is a relation definition but only for ForeignKey relation,
so excludes ManyToMany fields, as well as virtual ForeignKey
(second side of FK relation).
Is used to define if a field is a db ForeignKey column that
should be saved/populated when dealing with internal/own
Model columns only.
**Returns**:
`(bool)`: result of the check
<a name="fields.base.BaseField.get_alias"></a>
#### get\_alias
```python
| @classmethod
| get_alias(cls) -> str
```
Used to translate Model column names to database column names during db queries.
**Returns**:
`(str)`: returns custom database column name if defined by user,
otherwise field name in ormar/pydantic
<a name="fields.base.BaseField.is_valid_field_info_field"></a>
#### is\_valid\_field\_info\_field
```python
| @classmethod
| is_valid_field_info_field(cls, field_name: str) -> bool
```
Checks if field belongs to pydantic FieldInfo
- used during setting default pydantic values.
Excludes defaults and alias as they are populated separately
(defaults) or not at all (alias)
**Arguments**:
- `field_name (str)`: field name of BaseFIeld
**Returns**:
`(bool)`: True if field is present on pydantic.FieldInfo
<a name="fields.base.BaseField.convert_to_pydantic_field_info"></a>
#### convert\_to\_pydantic\_field\_info
```python
| @classmethod
| convert_to_pydantic_field_info(cls, allow_null: bool = False) -> FieldInfo
```
Converts a BaseField into pydantic.FieldInfo
that is later easily processed by pydantic.
Used in an ormar Model Metaclass.
**Arguments**:
- `allow_null (bool)`: flag if the default value can be None
or if it should be populated by pydantic Undefined
**Returns**:
`(pydantic.FieldInfo)`: actual instance of pydantic.FieldInfo with all needed fields populated
<a name="fields.base.BaseField.default_value"></a>
#### default\_value
```python
| @classmethod
| default_value(cls, use_server: bool = False) -> Optional[FieldInfo]
```
Returns a FieldInfo instance with populated default
(static) or default_factory (function).
If the field is a autoincrement primary key the default is None.
Otherwise field have to has either default, or default_factory populated.
If all default conditions fail None is returned.
Used in converting to pydantic FieldInfo.
**Arguments**:
- `use_server (bool)`: flag marking if server_default should be
treated as default value, default False
**Returns**:
`(Optional[pydantic.FieldInfo])`: returns a call to pydantic.Field
which is returning a FieldInfo instance
<a name="fields.base.BaseField.get_default"></a>
#### get\_default
```python
| @classmethod
| get_default(cls, use_server: bool = False) -> Any
```
Return default value for a field.
If the field is Callable the function is called and actual result is returned.
Used to populate default_values for pydantic Model in ormar Model Metaclass.
**Arguments**:
- `use_server (bool)`: flag marking if server_default should be
treated as default value, default False
**Returns**:
`(Any)`: default value for the field if set, otherwise implicit None
<a name="fields.base.BaseField.has_default"></a>
#### has\_default
```python
| @classmethod
| has_default(cls, use_server: bool = True) -> bool
```
Checks if the field has default value set.
**Arguments**:
- `use_server (bool)`: flag marking if server_default should be
treated as default value, default False
**Returns**:
`(bool)`: result of the check if default value is set
<a name="fields.base.BaseField.is_auto_primary_key"></a>
#### is\_auto\_primary\_key
```python
| @classmethod
| is_auto_primary_key(cls) -> bool
```
Checks if field is first a primary key and if it,
it's than check if it's set to autoincrement.
Autoincrement primary_key is nullable/optional.
**Returns**:
`(bool)`: result of the check for primary key and autoincrement
<a name="fields.base.BaseField.construct_constraints"></a>
#### construct\_constraints
```python
| @classmethod
| construct_constraints(cls) -> List
```
Converts list of ormar constraints into sqlalchemy ForeignKeys.
Has to be done dynamically as sqlalchemy binds ForeignKey to the table.
And we need a new ForeignKey for subclasses of current model
**Returns**:
`(List[sqlalchemy.schema.ForeignKey])`: List of sqlalchemy foreign keys - by default one.
<a name="fields.base.BaseField.get_column"></a>
#### get\_column
```python
| @classmethod
| get_column(cls, name: str) -> sqlalchemy.Column
```
Returns definition of sqlalchemy.Column used in creation of sqlalchemy.Table.
Populates name, column type constraints, as well as a number of parameters like
primary_key, index, unique, nullable, default and server_default.
**Arguments**:
- `name (str)`: name of the db column - used if alias is not set
**Returns**:
`(sqlalchemy.Column)`: actual definition of the database column as sqlalchemy requires.
<a name="fields.base.BaseField.expand_relationship"></a>
#### expand\_relationship
```python
| @classmethod
| expand_relationship(cls, value: Any, child: Union["Model", "NewBaseModel"], to_register: bool = True, relation_name: str = None) -> Any
```
Function overwritten for relations, in basic field the value is returned as is.
For relations the child model is first constructed (if needed),
registered in relation and returned.
For relation fields the value can be a pk value (Any type of field),
dict (from Model) or actual instance/list of a "Model".
**Arguments**:
- `value (Any)`: a Model field value, returned untouched for non relation fields.
- `child (Union["Model", "NewBaseModel"])`: a child Model to register
- `to_register (bool)`: flag if the relation should be set in RelationshipManager
**Returns**:
`(Any)`: returns untouched value for normal fields, expands only for relations

View File

@ -0,0 +1,28 @@
<a name="decorators.property_field"></a>
# decorators.property\_field
<a name="decorators.property_field.property_field"></a>
#### property\_field
```python
property_field(func: Callable) -> Union[property, Callable]
```
Decorator to set a property like function on Model to be exposed
as field in dict() and fastapi response.
Although you can decorate a @property field like this and this will work,
mypy validation will complain about this.
Note that "fields" exposed like this do not go through validation.
**Raises**:
- `ModelDefinitionError`: if method has any other argument than self.
**Arguments**:
- `func (Callable)`: decorated function to be exposed
**Returns**:
`(Union[property, Callable])`: decorated function passed in func param, with set __property_field__ = True

View File

@ -0,0 +1,246 @@
<a name="fields.foreign_key"></a>
# fields.foreign\_key
<a name="fields.foreign_key.create_dummy_instance"></a>
#### create\_dummy\_instance
```python
create_dummy_instance(fk: Type["Model"], pk: Any = None) -> "Model"
```
Ormar never returns you a raw data.
So if you have a related field that has a value populated
it will construct you a Model instance out of it.
Creates a "fake" instance of passed Model from pk value.
The instantiated Model has only pk value filled.
To achieve this __pk_only__ flag has to be passed as it skips the validation.
If the nested related Models are required they are set with -1 as pk value.
**Arguments**:
- `fk (Model class)`: class of the related Model to which instance should be constructed
- `pk (Any)`: value of the primary_key column
**Returns**:
`(Model)`: Model instance populated with only pk
<a name="fields.foreign_key.create_dummy_model"></a>
#### create\_dummy\_model
```python
create_dummy_model(base_model: Type["Model"], pk_field: Type[Union[BaseField, "ForeignKeyField", "ManyToManyField"]]) -> Type["BaseModel"]
```
Used to construct a dummy pydantic model for type hints and pydantic validation.
Populates only pk field and set it to desired type.
**Arguments**:
- `base_model (Model class)`: class of target dummy model
- `pk_field (Type[Union[BaseField, "ForeignKeyField", "ManyToManyField"]])`: ormar Field to be set on pydantic Model
**Returns**:
`(pydantic.BaseModel)`: constructed dummy model
<a name="fields.foreign_key.UniqueColumns"></a>
## UniqueColumns Objects
```python
class UniqueColumns(UniqueConstraint)
```
Subclass of sqlalchemy.UniqueConstraint.
Used to avoid importing anything from sqlalchemy by user.
<a name="fields.foreign_key.ForeignKeyConstraint"></a>
## ForeignKeyConstraint Objects
```python
@dataclass
class ForeignKeyConstraint()
```
Internal container to store ForeignKey definitions used later
to produce sqlalchemy.ForeignKeys
<a name="fields.foreign_key.ForeignKey"></a>
#### ForeignKey
```python
ForeignKey(to: Type["Model"], *, name: str = None, unique: bool = False, nullable: bool = True, related_name: str = None, virtual: bool = False, onupdate: str = None, ondelete: str = None, **kwargs: Any, ,) -> Any
```
Despite a name it's a function that returns constructed ForeignKeyField.
This function is actually used in model declaration (as ormar.ForeignKey(ToModel)).
Accepts number of relation setting parameters as well as all BaseField ones.
**Arguments**:
- `to (Model class)`: target related ormar Model
- `name (str)`: name of the database field - later called alias
- `unique (bool)`: parameter passed to sqlalchemy.ForeignKey, unique flag
- `nullable (bool)`: marks field as optional/ required
- `related_name (str)`: name of reversed FK relation populated for you on to model
- `virtual (bool)`: marks if relation is virtual.
It is for reversed FK and auto generated FK on through model in Many2Many relations.
- `onupdate (str)`: parameter passed to sqlalchemy.ForeignKey.
How to treat child rows on update of parent (the one where FK is defined) model.
- `ondelete (str)`: parameter passed to sqlalchemy.ForeignKey.
How to treat child rows on delete of parent (the one where FK is defined) model.
- `kwargs (Any)`: all other args to be populated by BaseField
**Returns**:
`(ForeignKeyField)`: ormar ForeignKeyField with relation to selected model
<a name="fields.foreign_key.ForeignKeyField"></a>
## ForeignKeyField Objects
```python
class ForeignKeyField(BaseField)
```
Actual class returned from ForeignKey function call and stored in model_fields.
<a name="fields.foreign_key.ForeignKeyField._extract_model_from_sequence"></a>
#### \_extract\_model\_from\_sequence
```python
| @classmethod
| _extract_model_from_sequence(cls, value: List, child: "Model", to_register: bool, relation_name: str) -> List["Model"]
```
Takes a list of Models and registers them on parent.
Registration is mutual, so children have also reference to parent.
Used in reverse FK relations.
**Arguments**:
- `value (List)`: list of Model
- `child (Model)`: child/ related Model
- `to_register (bool)`: flag if the relation should be set in RelationshipManager
**Returns**:
`(List["Model"])`: list (if needed) registered Models
<a name="fields.foreign_key.ForeignKeyField._register_existing_model"></a>
#### \_register\_existing\_model
```python
| @classmethod
| _register_existing_model(cls, value: "Model", child: "Model", to_register: bool, relation_name: str) -> "Model"
```
Takes already created instance and registers it for parent.
Registration is mutual, so children have also reference to parent.
Used in reverse FK relations and normal FK for single models.
**Arguments**:
- `value (Model)`: already instantiated Model
- `child (Model)`: child/ related Model
- `to_register (bool)`: flag if the relation should be set in RelationshipManager
**Returns**:
`(Model)`: (if needed) registered Model
<a name="fields.foreign_key.ForeignKeyField._construct_model_from_dict"></a>
#### \_construct\_model\_from\_dict
```python
| @classmethod
| _construct_model_from_dict(cls, value: dict, child: "Model", to_register: bool, relation_name: str) -> "Model"
```
Takes a dictionary, creates a instance and registers it for parent.
If dictionary contains only one field and it's a pk it is a __pk_only__ model.
Registration is mutual, so children have also reference to parent.
Used in normal FK for dictionaries.
**Arguments**:
- `value (dict)`: dictionary of a Model
- `child (Model)`: child/ related Model
- `to_register (bool)`: flag if the relation should be set in RelationshipManager
**Returns**:
`(Model)`: (if needed) registered Model
<a name="fields.foreign_key.ForeignKeyField._construct_model_from_pk"></a>
#### \_construct\_model\_from\_pk
```python
| @classmethod
| _construct_model_from_pk(cls, value: Any, child: "Model", to_register: bool, relation_name: str) -> "Model"
```
Takes a pk value, creates a dummy instance and registers it for parent.
Registration is mutual, so children have also reference to parent.
Used in normal FK for dictionaries.
**Arguments**:
- `value (Any)`: value of a related pk / fk column
- `child (Model)`: child/ related Model
- `to_register (bool)`: flag if the relation should be set in RelationshipManager
**Returns**:
`(Model)`: (if needed) registered Model
<a name="fields.foreign_key.ForeignKeyField.register_relation"></a>
#### register\_relation
```python
| @classmethod
| register_relation(cls, model: "Model", child: "Model", relation_name: str) -> None
```
Registers relation between parent and child in relation manager.
Relation manager is kep on each model (different instance).
Used in Metaclass and sometimes some relations are missing
(i.e. cloned Models in fastapi might miss one).
**Arguments**:
- `model (Model class)`: parent model (with relation definition)
- `child (Model class)`: child model
<a name="fields.foreign_key.ForeignKeyField.expand_relationship"></a>
#### expand\_relationship
```python
| @classmethod
| expand_relationship(cls, value: Any, child: Union["Model", "NewBaseModel"], to_register: bool = True, relation_name: str = None) -> Optional[Union["Model", List["Model"]]]
```
For relations the child model is first constructed (if needed),
registered in relation and returned.
For relation fields the value can be a pk value (Any type of field),
dict (from Model) or actual instance/list of a "Model".
Selects the appropriate constructor based on a passed value.
**Arguments**:
- `value (Any)`: a Model field value, returned untouched for non relation fields.
- `child (Union["Model", "NewBaseModel"])`: a child Model to register
- `to_register (bool)`: flag if the relation should be set in RelationshipManager
**Returns**:
`(Optional[Union["Model", List["Model"]]])`: returns a Model or a list of Models

View File

@ -0,0 +1,53 @@
<a name="fields.many_to_many"></a>
# fields.many\_to\_many
<a name="fields.many_to_many.ManyToMany"></a>
#### ManyToMany
```python
ManyToMany(to: Type["Model"], through: Type["Model"], *, name: str = None, unique: bool = False, virtual: bool = False, **kwargs: Any) -> Any
```
Despite a name it's a function that returns constructed ManyToManyField.
This function is actually used in model declaration
(as ormar.ManyToMany(ToModel, through=ThroughModel)).
Accepts number of relation setting parameters as well as all BaseField ones.
**Arguments**:
- `to (Model class)`: target related ormar Model
- `through (Model class)`: through model for m2m relation
- `name (str)`: name of the database field - later called alias
- `unique (bool)`: parameter passed to sqlalchemy.ForeignKey, unique flag
- `virtual (bool)`: marks if relation is virtual.
It is for reversed FK and auto generated FK on through model in Many2Many relations.
- `kwargs (Any)`: all other args to be populated by BaseField
**Returns**:
`(ManyToManyField)`: ormar ManyToManyField with m2m relation to selected model
<a name="fields.many_to_many.ManyToManyField"></a>
## ManyToManyField Objects
```python
class ManyToManyField(ForeignKeyField, ormar.QuerySetProtocol, ormar.RelationProtocol)
```
Actual class returned from ManyToMany function call and stored in model_fields.
<a name="fields.many_to_many.ManyToManyField.default_target_field_name"></a>
#### default\_target\_field\_name
```python
| @classmethod
| default_target_field_name(cls) -> str
```
Returns default target model name on through model.
**Returns**:
`(str)`: name of the field

View File

@ -0,0 +1,419 @@
<a name="fields.model_fields"></a>
# fields.model\_fields
<a name="fields.model_fields.is_field_nullable"></a>
#### is\_field\_nullable
```python
is_field_nullable(nullable: Optional[bool], default: Any, server_default: Any, pydantic_only: Optional[bool]) -> bool
```
Checks if the given field should be nullable/ optional based on parameters given.
**Arguments**:
- `nullable (Optional[bool])`: flag explicit setting a column as nullable
- `default (Any)`: value or function to be called as default in python
- `server_default (Any)`: function to be called as default by sql server
- `pydantic_only (Optional[bool])`: flag if fields should not be included in the sql table
**Returns**:
`(bool)`: result of the check
<a name="fields.model_fields.is_auto_primary_key"></a>
#### is\_auto\_primary\_key
```python
is_auto_primary_key(primary_key: bool, autoincrement: bool) -> bool
```
Checks if field is an autoincrement pk -> if yes it's optional.
**Arguments**:
- `primary_key (bool)`: flag if field is a pk field
- `autoincrement (bool)`: flag if field should be autoincrement
**Returns**:
`(bool)`: result of the check
<a name="fields.model_fields.ModelFieldFactory"></a>
## ModelFieldFactory Objects
```python
class ModelFieldFactory()
```
Default field factory that construct Field classes and populated their values.
<a name="fields.model_fields.ModelFieldFactory.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.ModelFieldFactory.validate"></a>
#### validate
```python
| @classmethod
| validate(cls, **kwargs: Any) -> None
```
Used to validate if all required parameters on a given field type are set.
**Arguments**:
- `kwargs (Any)`: all params passed during construction
<a name="fields.model_fields.String"></a>
## String Objects
```python
class String(ModelFieldFactory, str)
```
String field factory that construct Field classes and populated their values.
<a name="fields.model_fields.String.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.String.validate"></a>
#### validate
```python
| @classmethod
| validate(cls, **kwargs: Any) -> None
```
Used to validate if all required parameters on a given field type are set.
**Arguments**:
- `kwargs (Any)`: all params passed during construction
<a name="fields.model_fields.Integer"></a>
## Integer Objects
```python
class Integer(ModelFieldFactory, int)
```
Integer field factory that construct Field classes and populated their values.
<a name="fields.model_fields.Integer.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.Text"></a>
## Text Objects
```python
class Text(ModelFieldFactory, str)
```
Text field factory that construct Field classes and populated their values.
<a name="fields.model_fields.Text.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.Float"></a>
## Float Objects
```python
class Float(ModelFieldFactory, float)
```
Float field factory that construct Field classes and populated their values.
<a name="fields.model_fields.Float.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.DateTime"></a>
## DateTime Objects
```python
class DateTime(ModelFieldFactory, datetime.datetime)
```
DateTime field factory that construct Field classes and populated their values.
<a name="fields.model_fields.DateTime.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.Date"></a>
## Date Objects
```python
class Date(ModelFieldFactory, datetime.date)
```
Date field factory that construct Field classes and populated their values.
<a name="fields.model_fields.Date.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.Time"></a>
## Time Objects
```python
class Time(ModelFieldFactory, datetime.time)
```
Time field factory that construct Field classes and populated their values.
<a name="fields.model_fields.Time.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.JSON"></a>
## JSON Objects
```python
class JSON(ModelFieldFactory, pydantic.Json)
```
JSON field factory that construct Field classes and populated their values.
<a name="fields.model_fields.JSON.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.BigInteger"></a>
## BigInteger Objects
```python
class BigInteger(Integer, int)
```
BigInteger field factory that construct Field classes and populated their values.
<a name="fields.model_fields.BigInteger.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.Decimal"></a>
## Decimal Objects
```python
class Decimal(ModelFieldFactory, decimal.Decimal)
```
Decimal field factory that construct Field classes and populated their values.
<a name="fields.model_fields.Decimal.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options
<a name="fields.model_fields.Decimal.validate"></a>
#### validate
```python
| @classmethod
| validate(cls, **kwargs: Any) -> None
```
Used to validate if all required parameters on a given field type are set.
**Arguments**:
- `kwargs (Any)`: all params passed during construction
<a name="fields.model_fields.UUID"></a>
## UUID Objects
```python
class UUID(ModelFieldFactory, uuid.UUID)
```
UUID field factory that construct Field classes and populated their values.
<a name="fields.model_fields.UUID.get_column_type"></a>
#### get\_column\_type
```python
| @classmethod
| get_column_type(cls, **kwargs: Any) -> Any
```
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
**Arguments**:
- `kwargs (Any)`: key, value pairs of sqlalchemy options
**Returns**:
`(sqlalchemy Column)`: initialized column with proper options

118
docs/api/index.md Normal file
View File

@ -0,0 +1,118 @@
Contains documentation of the `ormar` internal API.
Note that this is a technical part of the documentation intended for `ormar` contributors.
!!!note
For completeness as of now even the internal and special methods are documented and exposed in API docs.
!!!warning
The current API docs version is a beta and not all methods are documented,
also some of redundant items are included since it was partially auto generated.
!!!danger
Ormar is still under development, and the **internals can change at any moment**.
You shouldn't rely even on the "public" methods if they are not documented in the
normal part of the docs.
## High level overview
Ormar is divided into packages for maintainability and ease of development.
Below you can find a short description of the structure of the whole project and
individual packages.
### Models
Contains the actual `ormar.Model` class, which is based on:
* `ormar.NewBaseModel` which in turns:
* inherits from `pydantic.BaseModel`,
* uses `ormar.ModelMetaclass` for all heavy lifting, relations declaration,
parsing `ormar` fields, creating `sqlalchemy` columns and tables etc.
* There is a lot of tasks during class creation so `ormar` is using a lot of
`helpers` methods separated by functionality: `pydantic`, `sqlachemy`,
`relations` & `models` located in `helpers` submodule.
* inherits from `ormar.ModelTableProxy` that combines `Mixins` providing a special
additional behavior for `ormar.Models`
* `AliasMixin` - handling of column aliases, which are names changed only in db
* `ExcludableMixin` - handling excluding and including fields in dict() and database calls
* `MergeModelMixin` - handling merging Models initialized from raw sql raws into Models that needs to be merged,
in example parent models in join query that duplicates in raw response.
* `PrefetchQueryMixin` - handling resolving relations and ids of models to extract during issuing
subsequent queries in prefetch_related
* `RelationMixin` - handling resolving relations names, related fields etc.
* `SavePrepareMixin` - handling converting related models to their pk values, translating ormar field
names into aliases etc.
### Fields
Contains `ormar.BaseField` that is a base for all fields.
All basic types are declared in `model_fields`, while relation fields are located in:
* `foreign_key`: `ForeignKey` relation, expanding relations meaning initializing nested models,
creating dummy models with pk only that skips validation etc.
* `many_to_many`: `ManyToMany` relation that do not have a lot of logic on its own.
Related to fields is a `@property_field` decorator that is located in `decorators.property_field`.
There is also a special UUID field declaration for `sqlalchemy` that is based on `CHAR` field type.
### Query Set
Package that handles almost all interactions with db (some small parts are in `ormar.Model` and in `ormar.QuerysetProxy`).
Provides a `QuerySet` that is exposed on each Model as `objects` property.
Have a vast number of methods to query, filter, create, update and delete database rows.
* Actual construction of the queries is delegated to `Query` class
* which in tern uses `SqlJoin` to construct joins
* `Clause` to convert `filter` and `exclude` conditions into sql
* `FilterQuery` to apply filter clauses on query
* `OrderQuery` to apply order by clauses on query
* `LimitQuery` to apply limit clause on query
* `OffsetQuery` to apply offset clause on query
* For prefetch_related the same is done by `PrefetchQuery`
* Common helpers functions are extracted into `utils`
### Relations
Handles registering relations, adding/removing to relations as well as returning the
actual related models instead of relation fields declared on Models.
* Each `ormar.Model` has its own `RelationManager` registered under `_orm` property.
* `RelationManager` handles `Relations` between two different models
* In case of reverse relations or m2m relations the `RelationProxy` is used which
is basically a list with some special methods that keeps a reference to a list of related models
* Also, for reverse relations and m2m relations `QuerySetProxy` is exposed, that is
used to query the already pre-filtered related models and handles Through models
instances for m2m relations, while delegating actual queries to `QuerySet`
* `AliasManager` handles registration of aliases for relations that are used in queries.
In order to be able to link multiple times to the same table in one query each link
has to have unique alias to properly identify columns and extract proper values.
Kind of global registry, aliases are randomly generated, so might differ on each run.
* Common helpers functions are extracted into `utils`
### Signals
Handles sending signals on particular events.
* `SignalEmitter` is registered on each `ormar.Model`, that allows to register any number of
receiver functions that will be notified on each event.
* For now only combination of (pre, post) (save, update, delete) events are pre populated for user
although it's easy to register user `Signal`s.
* set of decorators is prepared, each corresponding to one of the builtin signals,
that can be used to mark functions/methods that should become receivers, those decorators
are located in `decorators.signals`.
* You can register same function to different `ormar.Models` but each Model has it's own
Emitter that is independednt and issued on events for given Model.
* Currently, there is no way to register global `Signal` triggered for all models.
### Exceptions
Gathers all exceptions specific to `ormar`.
All `ormar` exceptions inherit from `AsyncOrmException`.

View File

@ -0,0 +1,64 @@
<a name="models.helpers.models"></a>
# models.helpers.models
<a name="models.helpers.models.populate_default_options_values"></a>
#### populate\_default\_options\_values
```python
populate_default_options_values(new_model: Type["Model"], model_fields: Dict) -> None
```
Sets all optional Meta values to it's defaults
and set model_fields that were already previously extracted.
Here should live all options that are not overwritten/set for all models.
Current options are:
* constraints = []
* abstract = False
**Arguments**:
- `new_model (Model class)`: newly constructed Model
- `model_fields (Union[Dict[str, type], Dict])`:
<a name="models.helpers.models.extract_annotations_and_default_vals"></a>
#### extract\_annotations\_and\_default\_vals
```python
extract_annotations_and_default_vals(attrs: Dict) -> Tuple[Dict, Dict]
```
Extracts annotations from class namespace dict and triggers
extraction of ormar model_fields.
**Arguments**:
- `attrs (Dict)`: namespace of the class created
**Returns**:
`(Tuple[Dict, Dict])`: namespace of the class updated, dict of extracted model_fields
<a name="models.helpers.models.validate_related_names_in_relations"></a>
#### validate\_related\_names\_in\_relations
```python
validate_related_names_in_relations(model_fields: Dict, new_model: Type["Model"]) -> None
```
Performs a validation of relation_names in relation fields.
If multiple fields are leading to the same related model
only one can have empty related_name param
(populated by default as model.name.lower()+'s').
Also related_names have to be unique for given related model.
**Raises**:
- `ModelDefinitionError`: if validation of related_names fail
**Arguments**:
- `model_fields (Dict[str, ormar.Field])`: dictionary of declared ormar model fields
- `new_model (Model class)`:

View File

@ -0,0 +1,122 @@
<a name="models.helpers.pydantic"></a>
# models.helpers.pydantic
<a name="models.helpers.pydantic.create_pydantic_field"></a>
#### create\_pydantic\_field
```python
create_pydantic_field(field_name: str, model: Type["Model"], model_field: Type[ManyToManyField]) -> None
```
Registers pydantic field on through model that leads to passed model
and is registered as field_name passed.
Through model is fetched from through attributed on passed model_field.
**Arguments**:
- `field_name (str)`: field name to register
- `model (Model class)`: type of field to register
- `model_field (ManyToManyField class)`: relation field from which through model is extracted
<a name="models.helpers.pydantic.get_pydantic_field"></a>
#### get\_pydantic\_field
```python
get_pydantic_field(field_name: str, model: Type["Model"]) -> "ModelField"
```
Extracts field type and if it's required from Model model_fields by passed
field_name. Returns a pydantic field with type of field_name field type.
**Arguments**:
- `field_name (str)`: field name to fetch from Model and name of pydantic field
- `model (Model class)`: type of field to register
**Returns**:
`(pydantic.ModelField)`: newly created pydantic field
<a name="models.helpers.pydantic.populate_default_pydantic_field_value"></a>
#### populate\_default\_pydantic\_field\_value
```python
populate_default_pydantic_field_value(ormar_field: Type[BaseField], field_name: str, attrs: dict) -> dict
```
Grabs current value of the ormar Field in class namespace
(so the default_value declared on ormar model if set)
and converts it to pydantic.FieldInfo
that pydantic is able to extract later.
On FieldInfo there are saved all needed params like max_length of the string
and other constraints that pydantic can use to build
it's own field validation used by ormar.
**Arguments**:
- `ormar_field (ormar Field)`: field to convert
- `field_name (str)`: field to convert name
- `attrs (Dict)`: current class namespace
**Returns**:
`(Dict)`: updated namespace dict
<a name="models.helpers.pydantic.populate_pydantic_default_values"></a>
#### populate\_pydantic\_default\_values
```python
populate_pydantic_default_values(attrs: Dict) -> Tuple[Dict, Dict]
```
Extracts ormar fields from annotations (deprecated) and from namespace
dictionary of the class. Fields declared on model are all subclasses of the
BaseField class.
Trigger conversion of ormar field into pydantic FieldInfo, which has all needed
paramaters saved.
Overwrites the annotations of ormar fields to corresponding types declared on
ormar fields (constructed dynamically for relations).
Those annotations are later used by pydantic to construct it's own fields.
**Arguments**:
- `attrs (Dict)`: current class namespace
**Returns**:
`(Tuple[Dict, Dict])`: namespace of the class updated, dict of extracted model_fields
<a name="models.helpers.pydantic.get_pydantic_base_orm_config"></a>
#### get\_pydantic\_base\_orm\_config
```python
get_pydantic_base_orm_config() -> Type[BaseConfig]
```
Returns empty pydantic Config with orm_mode set to True.
**Returns**:
`(pydantic Config)`: empty default config with orm_mode set.
<a name="models.helpers.pydantic.get_potential_fields"></a>
#### get\_potential\_fields
```python
get_potential_fields(attrs: Dict) -> Dict
```
Gets all the fields in current class namespace that are Fields.
**Arguments**:
- `attrs (Dict)`: current class namespace
**Returns**:
`(Dict)`: extracted fields that are ormar Fields

View File

@ -0,0 +1,151 @@
<a name="models.helpers.relations"></a>
# models.helpers.relations
<a name="models.helpers.relations.register_relation_on_build"></a>
#### register\_relation\_on\_build
```python
register_relation_on_build(new_model: Type["Model"], field_name: str) -> None
```
Registers ForeignKey relation in alias_manager to set a table_prefix.
Registration include also reverse relation side to be able to join both sides.
Relation is registered by model name and relation field name to allow for multiple
relations between two Models that needs to have different
aliases for proper sql joins.
**Arguments**:
- `new_model (Model class)`: constructed model
- `field_name (str)`: name of the related field
<a name="models.helpers.relations.register_many_to_many_relation_on_build"></a>
#### register\_many\_to\_many\_relation\_on\_build
```python
register_many_to_many_relation_on_build(new_model: Type["Model"], field: Type[ManyToManyField], field_name: str) -> None
```
Registers connection between through model and both sides of the m2m relation.
Registration include also reverse relation side to be able to join both sides.
Relation is registered by model name and relation field name to allow for multiple
relations between two Models that needs to have different
aliases for proper sql joins.
By default relation name is a model.name.lower().
**Arguments**:
- `field_name (str)`: name of the relation key
- `new_model (Model class)`: model on which m2m field is declared
- `field (ManyToManyField class)`: relation field
<a name="models.helpers.relations.expand_reverse_relationships"></a>
#### expand\_reverse\_relationships
```python
expand_reverse_relationships(model: Type["Model"]) -> None
```
Iterates through model_fields of given model and verifies if all reverse
relation have been populated on related models.
If the reverse relation has not been set before it's set here.
**Arguments**:
- `model (Model class)`: model on which relation should be checked and registered
<a name="models.helpers.relations.register_reverse_model_fields"></a>
#### register\_reverse\_model\_fields
```python
register_reverse_model_fields(model: Type["Model"], child: Type["Model"], related_name: str, model_field: Type["ForeignKeyField"]) -> None
```
Registers reverse ForeignKey field on related model.
By default it's name.lower()+'s' of the model on which relation is defined.
But if the related_model name is provided it's registered with that name.
Autogenerated reverse fields also set related_name to the original field name.
**Arguments**:
- `model (Model class)`: related model on which reverse field should be defined
- `child (Model class)`: parent model with relation definition
- `related_name (str)`: name by which reverse key should be registered
- `model_field (relation Field)`: original relation ForeignKey field
<a name="models.helpers.relations.register_relation_in_alias_manager"></a>
#### register\_relation\_in\_alias\_manager
```python
register_relation_in_alias_manager(new_model: Type["Model"], field: Type[ForeignKeyField], field_name: str) -> None
```
Registers the relation (and reverse relation) in alias manager.
The m2m relations require registration of through model between
actual end models of the relation.
Delegates the actual registration to:
m2m - register_many_to_many_relation_on_build
fk - register_relation_on_build
**Arguments**:
- `new_model (Model class)`: model on which relation field is declared
- `field (ForeignKey or ManyToManyField class)`: relation field
- `field_name (str)`: name of the relation key
<a name="models.helpers.relations.verify_related_name_dont_duplicate"></a>
#### verify\_related\_name\_dont\_duplicate
```python
verify_related_name_dont_duplicate(child: Type["Model"], parent_model: Type["Model"], related_name: str) -> None
```
Verifies whether the used related_name (regardless of the fact if user defined or
auto generated) is already used on related model, but is connected with other model
than the one that we connect right now.
**Raises**:
- `ModelDefinitionError`: if name is already used but lead to different related
model
**Arguments**:
- `child (ormar.models.metaclass.ModelMetaclass)`: related Model class
- `parent_model (ormar.models.metaclass.ModelMetaclass)`: parent Model class
- `related_name ()`:
**Returns**:
`(None)`: None
<a name="models.helpers.relations.reverse_field_not_already_registered"></a>
#### reverse\_field\_not\_already\_registered
```python
reverse_field_not_already_registered(child: Type["Model"], child_model_name: str, parent_model: Type["Model"]) -> bool
```
Checks if child is already registered in parents pydantic fields.
**Raises**:
- `ModelDefinitionError`: if related name is already used but lead to different
related model
**Arguments**:
- `child (ormar.models.metaclass.ModelMetaclass)`: related Model class
- `child_model_name (str)`: related_name of the child if provided
- `parent_model (ormar.models.metaclass.ModelMetaclass)`: parent Model class
**Returns**:
`(bool)`: result of the check

View File

@ -0,0 +1,145 @@
<a name="models.helpers.sqlalchemy"></a>
# models.helpers.sqlalchemy
<a name="models.helpers.sqlalchemy.adjust_through_many_to_many_model"></a>
#### adjust\_through\_many\_to\_many\_model
```python
adjust_through_many_to_many_model(model: Type["Model"], child: Type["Model"], model_field: Type[ManyToManyField]) -> None
```
Registers m2m relation on through model.
Sets ormar.ForeignKey from through model to both child and parent models.
Sets sqlalchemy.ForeignKey to both child and parent models.
Sets pydantic fields with child and parent model types.
**Arguments**:
- `model (Model class)`: model on which relation is declared
- `child (Model class)`: model to which m2m relation leads
- `model_field (ManyToManyField)`: relation field defined in parent model
<a name="models.helpers.sqlalchemy.create_and_append_m2m_fk"></a>
#### create\_and\_append\_m2m\_fk
```python
create_and_append_m2m_fk(model: Type["Model"], model_field: Type[ManyToManyField]) -> None
```
Registers sqlalchemy Column with sqlalchemy.ForeignKey leadning to the model.
Newly created field is added to m2m relation through model Meta columns and table.
**Arguments**:
- `model (Model class)`: Model class to which FK should be created
- `model_field (ManyToManyField field)`: field with ManyToMany relation
<a name="models.helpers.sqlalchemy.check_pk_column_validity"></a>
#### check\_pk\_column\_validity
```python
check_pk_column_validity(field_name: str, field: BaseField, pkname: Optional[str]) -> Optional[str]
```
Receives the field marked as primary key and verifies if the pkname
was not already set (only one allowed per model) and if field is not marked
as pydantic_only as it needs to be a database field.
**Raises**:
- `ModelDefintionError`: if pkname already set or field is pydantic_only
**Arguments**:
- `field_name (str)`: name of field
- `field (BaseField)`: ormar.Field
- `pkname (Optional[str])`: already set pkname
**Returns**:
`(str)`: name of the field that should be set as pkname
<a name="models.helpers.sqlalchemy.sqlalchemy_columns_from_model_fields"></a>
#### sqlalchemy\_columns\_from\_model\_fields
```python
sqlalchemy_columns_from_model_fields(model_fields: Dict, new_model: Type["Model"]) -> Tuple[Optional[str], List[sqlalchemy.Column]]
```
Iterates over declared on Model model fields and extracts fields that
should be treated as database fields.
If the model is empty it sets mandatory id field as primary key
(used in through models in m2m relations).
Triggers a validation of relation_names in relation fields. If multiple fields
are leading to the same related model only one can have empty related_name param.
Also related_names have to be unique.
Trigger validation of primary_key - only one and required pk can be set,
cannot be pydantic_only.
Append fields to columns if it's not pydantic_only,
virtual ForeignKey or ManyToMany field.
**Raises**:
- `ModelDefinitionError`: if validation of related_names fail,
or pkname validation fails.
**Arguments**:
- `model_fields (Dict[str, ormar.Field])`: dictionary of declared ormar model fields
- `new_model (Model class)`:
**Returns**:
`(Tuple[Optional[str], List[sqlalchemy.Column]])`: pkname, list of sqlalchemy columns
<a name="models.helpers.sqlalchemy.populate_meta_tablename_columns_and_pk"></a>
#### populate\_meta\_tablename\_columns\_and\_pk
```python
populate_meta_tablename_columns_and_pk(name: str, new_model: Type["Model"]) -> Type["Model"]
```
Sets Model tablename if it's not already set in Meta.
Default tablename if not present is class name lower + s (i.e. Bed becomes -> beds)
Checks if Model's Meta have pkname and columns set.
If not calls the sqlalchemy_columns_from_model_fields to populate
columns from ormar.fields definitions.
**Raises**:
- `ModelDefinitionError`: if pkname is not present raises ModelDefinitionError.
Each model has to have pk.
**Arguments**:
- `name (str)`: name of the current Model
- `new_model (ormar.models.metaclass.ModelMetaclass)`: currently constructed Model
**Returns**:
`(ormar.models.metaclass.ModelMetaclass)`: Model with populated pkname and columns in Meta
<a name="models.helpers.sqlalchemy.populate_meta_sqlalchemy_table_if_required"></a>
#### populate\_meta\_sqlalchemy\_table\_if\_required
```python
populate_meta_sqlalchemy_table_if_required(meta: "ModelMeta") -> None
```
Constructs sqlalchemy table out of columns and parameters set on Meta class.
It populates name, metadata, columns and constraints.
**Arguments**:
- `meta (Model class Meta)`: Meta class of the Model without sqlalchemy table constructed
**Returns**:
`(Model class)`: class with populated Meta.table

View File

@ -0,0 +1,90 @@
<a name="models.mixins.alias_mixin"></a>
# models.mixins.alias\_mixin
<a name="models.mixins.alias_mixin.AliasMixin"></a>
## AliasMixin Objects
```python
class AliasMixin()
```
Used to translate field names into database column names.
<a name="models.mixins.alias_mixin.AliasMixin.get_column_alias"></a>
#### get\_column\_alias
```python
| @classmethod
| get_column_alias(cls, field_name: str) -> str
```
Returns db alias (column name in db) for given ormar field.
For fields without alias field name is returned.
**Arguments**:
- `field_name (str)`: name of the field to get alias from
**Returns**:
`(str)`: alias (db name) if set, otherwise passed name
<a name="models.mixins.alias_mixin.AliasMixin.get_column_name_from_alias"></a>
#### get\_column\_name\_from\_alias
```python
| @classmethod
| get_column_name_from_alias(cls, alias: str) -> str
```
Returns ormar field name for given db alias (column name in db).
If field do not have alias it's returned as is.
**Arguments**:
- `alias (str)`:
**Returns**:
`(str)`: field name if set, otherwise passed alias (db name)
<a name="models.mixins.alias_mixin.AliasMixin.translate_columns_to_aliases"></a>
#### translate\_columns\_to\_aliases
```python
| @classmethod
| translate_columns_to_aliases(cls, new_kwargs: Dict) -> Dict
```
Translates dictionary of model fields changing field names into aliases.
If field has no alias the field name remains intact.
Only fields present in the dictionary are translated.
**Arguments**:
- `new_kwargs (Dict)`: dict with fields names and their values
**Returns**:
`(Dict)`: dict with aliases and their values
<a name="models.mixins.alias_mixin.AliasMixin.translate_aliases_to_columns"></a>
#### translate\_aliases\_to\_columns
```python
| @classmethod
| translate_aliases_to_columns(cls, new_kwargs: Dict) -> Dict
```
Translates dictionary of model fields changing aliases into field names.
If field has no alias the alias is already a field name.
Only fields present in the dictionary are translated.
**Arguments**:
- `new_kwargs (Dict)`: dict with aliases and their values
**Returns**:
`(Dict)`: dict with fields names and their values

View File

@ -0,0 +1,206 @@
<a name="models.mixins.excludable_mixin"></a>
# models.mixins.excludable\_mixin
<a name="models.mixins.excludable_mixin.ExcludableMixin"></a>
## ExcludableMixin Objects
```python
class ExcludableMixin(RelationMixin)
```
Used to include/exclude given set of fields on models during load and dict() calls.
<a name="models.mixins.excludable_mixin.ExcludableMixin.get_child"></a>
#### get\_child
```python
| @staticmethod
| get_child(items: Union[Set, Dict, None], key: str = None) -> Union[Set, Dict, None]
```
Used to get nested dictionaries keys if they exists otherwise returns
passed items.
**Arguments**:
- `items (Union[Set, Dict, None])`: bag of items to include or exclude
- `key (str)`: name of the child to extract
**Returns**:
`(Union[Set, Dict, None])`: child extracted from items if exists
<a name="models.mixins.excludable_mixin.ExcludableMixin.get_excluded"></a>
#### get\_excluded
```python
| @staticmethod
| get_excluded(exclude: Union[Set, Dict, None], key: str = None) -> Union[Set, Dict, None]
```
Proxy to ExcludableMixin.get_child for exclusions.
**Arguments**:
- `exclude (Union[Set, Dict, None])`: bag of items to exclude
- `key (str)`: name of the child to extract
**Returns**:
`(Union[Set, Dict, None])`: child extracted from items if exists
<a name="models.mixins.excludable_mixin.ExcludableMixin.get_included"></a>
#### get\_included
```python
| @staticmethod
| get_included(include: Union[Set, Dict, None], key: str = None) -> Union[Set, Dict, None]
```
Proxy to ExcludableMixin.get_child for inclusions.
**Arguments**:
- `include (Union[Set, Dict, None])`: bag of items to include
- `key (str)`: name of the child to extract
**Returns**:
`(Union[Set, Dict, None])`: child extracted from items if exists
<a name="models.mixins.excludable_mixin.ExcludableMixin.is_excluded"></a>
#### is\_excluded
```python
| @staticmethod
| is_excluded(exclude: Union[Set, Dict, None], key: str = None) -> bool
```
Checks if given key should be excluded on model/ dict.
**Arguments**:
- `exclude (Union[Set, Dict, None])`: bag of items to exclude
- `key (str)`: name of the child to extract
**Returns**:
`(Union[Set, Dict, None])`: child extracted from items if exists
<a name="models.mixins.excludable_mixin.ExcludableMixin.is_included"></a>
#### is\_included
```python
| @staticmethod
| is_included(include: Union[Set, Dict, None], key: str = None) -> bool
```
Checks if given key should be included on model/ dict.
**Arguments**:
- `include (Union[Set, Dict, None])`: bag of items to include
- `key (str)`: name of the child to extract
**Returns**:
`(Union[Set, Dict, None])`: child extracted from items if exists
<a name="models.mixins.excludable_mixin.ExcludableMixin._populate_pk_column"></a>
#### \_populate\_pk\_column
```python
| @staticmethod
| _populate_pk_column(model: Type["Model"], columns: List[str], use_alias: bool = False) -> List[str]
```
Adds primary key column/alias (depends on use_alias flag) to list of
column names that are selected.
**Arguments**:
- `model (Type["Model"])`: model on columns are selected
- `columns (List[str])`: list of columns names
- `use_alias (bool)`: flag to set if aliases or field names should be used
**Returns**:
`(List[str])`: list of columns names with pk column in it
<a name="models.mixins.excludable_mixin.ExcludableMixin.own_table_columns"></a>
#### own\_table\_columns
```python
| @classmethod
| own_table_columns(cls, model: Type["Model"], fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]], use_alias: bool = False) -> List[str]
```
Returns list of aliases or field names for given model.
Aliases/names switch is use_alias flag.
If provided only fields included in fields will be returned.
If provided fields in exclude_fields will be excluded in return.
Primary key field is always added and cannot be excluded (will be added anyway).
**Arguments**:
- `model (Type["Model"])`: model on columns are selected
- `fields (Optional[Union[Set, Dict]])`: set/dict of fields to include
- `exclude_fields (Optional[Union[Set, Dict]])`: set/dict of fields to exclude
- `use_alias (bool)`: flag if aliases or field names should be used
**Returns**:
`(List[str])`: list of column field names or aliases
<a name="models.mixins.excludable_mixin.ExcludableMixin._update_excluded_with_related_not_required"></a>
#### \_update\_excluded\_with\_related\_not\_required
```python
| @classmethod
| _update_excluded_with_related_not_required(cls, exclude: Union["AbstractSetIntStr", "MappingIntStrAny", None], nested: bool = False) -> Union[Set, Dict]
```
Used during generation of the dict().
To avoid cyclical references and max recurrence limit nested models have to
exclude related models that are not mandatory.
For a main model (not nested) only nullable related field names are added to
exclusion, for nested models all related models are excluded.
**Arguments**:
- `exclude (Union[Set, Dict, None])`: set/dict with fields to exclude
- `nested (bool)`: flag setting nested models (child of previous one, not main one)
**Returns**:
`(Union[Set, Dict])`: set or dict with excluded fields added.
<a name="models.mixins.excludable_mixin.ExcludableMixin.get_names_to_exclude"></a>
#### get\_names\_to\_exclude
```python
| @classmethod
| get_names_to_exclude(cls, fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None) -> Set
```
Returns a set of models field names that should be explicitly excluded
during model initialization.
Those fields will be set to None to avoid ormar/pydantic setting default
values on them. They should be returned as None in any case.
Used in parsing data from database rows that construct Models by initializing
them with dicts constructed from those db rows.
**Arguments**:
- `fields (Optional[Union[Set, Dict]])`: set/dict of fields to include
- `exclude_fields (Optional[Union[Set, Dict]])`: set/dict of fields to exclude
**Returns**:
`(Set)`: set of field names that should be excluded

View File

@ -0,0 +1,60 @@
<a name="models.mixins.merge_mixin"></a>
# models.mixins.merge\_mixin
<a name="models.mixins.merge_mixin.MergeModelMixin"></a>
## MergeModelMixin Objects
```python
class MergeModelMixin()
```
Used to merge models instances returned by database,
but already initialized to ormar Models.keys
Models can duplicate during joins when parent model has multiple child rows,
in the end all parent (main) models should be unique.
<a name="models.mixins.merge_mixin.MergeModelMixin.merge_instances_list"></a>
#### merge\_instances\_list
```python
| @classmethod
| merge_instances_list(cls, result_rows: Sequence["Model"]) -> Sequence["Model"]
```
Merges a list of models into list of unique models.
Models can duplicate during joins when parent model has multiple child rows,
in the end all parent (main) models should be unique.
**Arguments**:
- `result_rows (List["Model"])`: list of already initialized Models with child models
populated, each instance is one row in db and some models can duplicate
**Returns**:
`(List["Model"])`: list of merged models where each main model is unique
<a name="models.mixins.merge_mixin.MergeModelMixin.merge_two_instances"></a>
#### merge\_two\_instances
```python
| @classmethod
| merge_two_instances(cls, one: "Model", other: "Model") -> "Model"
```
Merges current (other) Model and previous one (one) and returns the current
Model instance with data merged from previous one.
If needed it's calling itself recurrently and merges also children models.
**Arguments**:
- `one (Model)`: previous model instance
- `other (Model)`: current model instance
**Returns**:
`(Model)`: current Model instance with data merged from previous one.

View File

@ -0,0 +1,100 @@
<a name="models.mixins.prefetch_mixin"></a>
# models.mixins.prefetch\_mixin
<a name="models.mixins.prefetch_mixin.PrefetchQueryMixin"></a>
## PrefetchQueryMixin Objects
```python
class PrefetchQueryMixin(RelationMixin)
```
Used in PrefetchQuery to extract ids and names of models to prefetch.
<a name="models.mixins.prefetch_mixin.PrefetchQueryMixin.get_clause_target_and_filter_column_name"></a>
#### get\_clause\_target\_and\_filter\_column\_name
```python
| @staticmethod
| get_clause_target_and_filter_column_name(parent_model: Type["Model"], target_model: Type["Model"], reverse: bool, related: str) -> Tuple[Type["Model"], str]
```
Returns Model on which query clause should be performed and name of the column.
**Arguments**:
- `parent_model (Type[Model])`: related model that the relation lead to
- `target_model (Type[Model])`: model on which query should be perfomed
- `reverse (bool)`: flag if the relation is reverse
- `related (str)`: name of the relation field
**Returns**:
`(Tuple[Type[Model], str])`: Model on which query clause should be performed and name of the column
<a name="models.mixins.prefetch_mixin.PrefetchQueryMixin.get_column_name_for_id_extraction"></a>
#### get\_column\_name\_for\_id\_extraction
```python
| @staticmethod
| get_column_name_for_id_extraction(parent_model: Type["Model"], reverse: bool, related: str, use_raw: bool) -> str
```
Returns name of the column that should be used to extract ids from model.
Depending on the relation side it's either primary key column of parent model
or field name specified by related parameter.
**Arguments**:
- `parent_model (Type[Model])`: model from which id column should be extracted
- `reverse (bool)`: flag if the relation is reverse
- `related (str)`: name of the relation field
- `use_raw (bool)`: flag if aliases or field names should be used
**Returns**:
`()`:
<a name="models.mixins.prefetch_mixin.PrefetchQueryMixin.get_related_field_name"></a>
#### get\_related\_field\_name
```python
| @classmethod
| get_related_field_name(cls, target_field: Type["BaseField"]) -> str
```
Returns name of the relation field that should be used in prefetch query.
This field is later used to register relation in prefetch query,
populate relations dict, and populate nested model in prefetch query.
**Arguments**:
- `target_field (Type[BaseField])`: relation field that should be used in prefetch
**Returns**:
`(str)`: name of the field
<a name="models.mixins.prefetch_mixin.PrefetchQueryMixin.get_filtered_names_to_extract"></a>
#### get\_filtered\_names\_to\_extract
```python
| @classmethod
| get_filtered_names_to_extract(cls, prefetch_dict: Dict) -> List
```
Returns list of related fields names that should be followed to prefetch related
models from.
List of models is translated into dict to assure each model is extracted only
once in one query, that's why this function accepts prefetch_dict not list.
Only relations from current model are returned.
**Arguments**:
- `prefetch_dict (Dict)`: dictionary of fields to extract
**Returns**:
`(List)`: list of fields names to extract

View File

@ -0,0 +1,93 @@
<a name="models.mixins.relation_mixin"></a>
# models.mixins.relation\_mixin
<a name="models.mixins.relation_mixin.RelationMixin"></a>
## RelationMixin Objects
```python
class RelationMixin()
```
Used to return relation fields/names etc. from given model
<a name="models.mixins.relation_mixin.RelationMixin.extract_db_own_fields"></a>
#### extract\_db\_own\_fields
```python
| @classmethod
| extract_db_own_fields(cls) -> Set
```
Returns only fields that are stored in the own database table, exclude all
related fields.
**Returns**:
`(Set)`: set of model fields with relation fields excluded
<a name="models.mixins.relation_mixin.RelationMixin.extract_related_fields"></a>
#### extract\_related\_fields
```python
| @classmethod
| extract_related_fields(cls) -> List
```
Returns List of ormar Fields for all relations declared on a model.
List is cached in cls._related_fields for quicker access.
**Returns**:
`(List)`: list of related fields
<a name="models.mixins.relation_mixin.RelationMixin.extract_related_names"></a>
#### extract\_related\_names
```python
| @classmethod
| extract_related_names(cls) -> Set
```
Returns List of fields names for all relations declared on a model.
List is cached in cls._related_names for quicker access.
**Returns**:
`(List)`: list of related fields names
<a name="models.mixins.relation_mixin.RelationMixin._extract_db_related_names"></a>
#### \_extract\_db\_related\_names
```python
| @classmethod
| _extract_db_related_names(cls) -> Set
```
Returns only fields that are stored in the own database table, exclude
related fields that are not stored as foreign keys on given model.
**Returns**:
`(Set)`: set of model fields with non fk relation fields excluded
<a name="models.mixins.relation_mixin.RelationMixin._exclude_related_names_not_required"></a>
#### \_exclude\_related\_names\_not\_required
```python
| @classmethod
| _exclude_related_names_not_required(cls, nested: bool = False) -> Set
```
Returns a set of non mandatory related models field names.
For a main model (not nested) only nullable related field names are returned,
for nested models all related models are returned.
**Arguments**:
- `nested (bool)`: flag setting nested models (child of previous one, not main one)
**Returns**:
`(Set)`: set of non mandatory related fields

View File

@ -0,0 +1,93 @@
<a name="models.mixins.save_mixin"></a>
# models.mixins.save\_mixin
<a name="models.mixins.save_mixin.SavePrepareMixin"></a>
## SavePrepareMixin Objects
```python
class SavePrepareMixin(RelationMixin, AliasMixin)
```
Used to prepare models to be saved in database
<a name="models.mixins.save_mixin.SavePrepareMixin.prepare_model_to_save"></a>
#### prepare\_model\_to\_save
```python
| @classmethod
| prepare_model_to_save(cls, new_kwargs: dict) -> dict
```
Combines all preparation methods before saving.
Removes primary key for if it's nullable or autoincrement pk field,
and it's set to None.
Substitute related models with their primary key values as fk column.
Populates the default values for field with default set and no value.
Translate columns into aliases (db names).
**Arguments**:
- `new_kwargs (Dict[str, str])`: dictionary of model that is about to be saved
**Returns**:
`(Dict[str, str])`: dictionary of model that is about to be saved
<a name="models.mixins.save_mixin.SavePrepareMixin._remove_pk_from_kwargs"></a>
#### \_remove\_pk\_from\_kwargs
```python
| @classmethod
| _remove_pk_from_kwargs(cls, new_kwargs: dict) -> dict
```
Removes primary key for if it's nullable or autoincrement pk field,
and it's set to None.
**Arguments**:
- `new_kwargs (Dict[str, str])`: dictionary of model that is about to be saved
**Returns**:
`(Dict[str, str])`: dictionary of model that is about to be saved
<a name="models.mixins.save_mixin.SavePrepareMixin.substitute_models_with_pks"></a>
#### substitute\_models\_with\_pks
```python
| @classmethod
| substitute_models_with_pks(cls, model_dict: Dict) -> Dict
```
Receives dictionary of model that is about to be saved and changes all related
models that are stored as foreign keys to their fk value.
**Arguments**:
- `model_dict (Dict)`: dictionary of model that is about to be saved
**Returns**:
`(Dict)`: dictionary of model that is about to be saved
<a name="models.mixins.save_mixin.SavePrepareMixin.populate_default_values"></a>
#### populate\_default\_values
```python
| @classmethod
| populate_default_values(cls, new_kwargs: Dict) -> Dict
```
Receives dictionary of model that is about to be saved and populates the default
value on the fields that have the default value set, but no actual value was
passed by the user.
**Arguments**:
- `new_kwargs (Dict)`: dictionary of model that is about to be saved
**Returns**:
`(Dict)`: dictionary of model that is about to be saved

View File

@ -0,0 +1,326 @@
<a name="models.metaclass"></a>
# models.metaclass
<a name="models.metaclass.check_if_field_has_choices"></a>
#### check\_if\_field\_has\_choices
```python
check_if_field_has_choices(field: Type[BaseField]) -> bool
```
Checks if given field has choices populated.
A if it has one, a validator for this field needs to be attached.
**Arguments**:
- `field (BaseField)`: ormar field to check
**Returns**:
`(bool)`: result of the check
<a name="models.metaclass.choices_validator"></a>
#### choices\_validator
```python
choices_validator(cls: Type["Model"], values: Dict[str, Any]) -> Dict[str, Any]
```
Validator that is attached to pydantic model pre root validators.
Validator checks if field value is in field.choices list.
**Raises**:
- `ValueError`: if field value is outside of allowed choices.
**Arguments**:
- `cls (Model class)`: constructed class
- `values (Dict[str, Any])`: dictionary of field values (pydantic side)
**Returns**:
`(Dict[str, Any])`: values if pass validation, otherwise exception is raised
<a name="models.metaclass.populate_choices_validators"></a>
#### populate\_choices\_validators
```python
populate_choices_validators(model: Type["Model"]) -> None
```
Checks if Model has any fields with choices set.
If yes it adds choices validation into pre root validators.
**Arguments**:
- `model (Model class)`: newly constructed Model
<a name="models.metaclass.add_cached_properties"></a>
#### add\_cached\_properties
```python
add_cached_properties(new_model: Type["Model"]) -> None
```
Sets cached properties for both pydantic and ormar models.
Quick access fields are fields grabbed in getattribute to skip all checks.
Related fields and names are populated to None as they can change later.
When children models are constructed they can modify parent to register itself.
All properties here are used as "cache" to not recalculate them constantly.
**Arguments**:
- `new_model (Model class)`: newly constructed Model
<a name="models.metaclass.meta_field_not_set"></a>
#### meta\_field\_not\_set
```python
meta_field_not_set(model: Type["Model"], field_name: str) -> bool
```
Checks if field with given name is already present in model.Meta.
Then check if it's set to something truthful
(in practice meaning not None, as it's non or ormar Field only).
**Arguments**:
- `model (Model class)`: newly constructed model
- `field_name (str)`: name of the ormar field
**Returns**:
`(bool)`: result of the check
<a name="models.metaclass.add_property_fields"></a>
#### add\_property\_fields
```python
add_property_fields(new_model: Type["Model"], attrs: Dict) -> None
```
Checks class namespace for properties or functions with __property_field__.
If attribute have __property_field__ it was decorated with @property_field.
Functions like this are exposed in dict() (therefore also fastapi result).
Names of property fields are cached for quicker access / extraction.
**Arguments**:
- `new_model (Model class)`: newly constructed model
- `attrs (Dict[str, str])`:
<a name="models.metaclass.register_signals"></a>
#### register\_signals
```python
register_signals(new_model: Type["Model"]) -> None
```
Registers on model's SignalEmmiter and sets pre defined signals.
Predefined signals are (pre/post) + (save/update/delete).
Signals are emitted in both model own methods and in selected queryset ones.
**Arguments**:
- `new_model (Model class)`: newly constructed model
<a name="models.metaclass.update_attrs_and_fields"></a>
#### update\_attrs\_and\_fields
```python
update_attrs_and_fields(attrs: Dict, new_attrs: Dict, model_fields: Dict, new_model_fields: Dict, new_fields: Set) -> Dict
```
Updates __annotations__, values of model fields (so pydantic FieldInfos)
as well as model.Meta.model_fields definitions from parents.
**Arguments**:
- `attrs (Dict)`: new namespace for class being constructed
- `new_attrs (Dict)`: part of the namespace extracted from parent class
- `model_fields (Dict[str, BaseField])`: ormar fields in defined in current class
- `new_model_fields (Dict[str, BaseField])`: ormar fields defined in parent classes
- `new_fields (Set[str])`: set of new fields names
<a name="models.metaclass.verify_constraint_names"></a>
#### verify\_constraint\_names
```python
verify_constraint_names(base_class: "Model", model_fields: Dict, parent_value: List) -> None
```
Verifies if redefined fields that are overwritten in subclasses did not remove
any name of the column that is used in constraint as it will fail in sqlalchemy
Table creation.
**Arguments**:
- `base_class (Model or model parent class)`: one of the parent classes
- `model_fields (Dict[str, BaseField])`: ormar fields in defined in current class
- `parent_value (List)`: list of base class constraints
<a name="models.metaclass.update_attrs_from_base_meta"></a>
#### update\_attrs\_from\_base\_meta
```python
update_attrs_from_base_meta(base_class: "Model", attrs: Dict, model_fields: Dict) -> None
```
Updates Meta parameters in child from parent if needed.
**Arguments**:
- `base_class (Model or model parent class)`: one of the parent classes
- `attrs (Dict)`: new namespace for class being constructed
- `model_fields (Dict[str, BaseField])`: ormar fields in defined in current class
<a name="models.metaclass.copy_and_replace_m2m_through_model"></a>
#### copy\_and\_replace\_m2m\_through\_model
```python
copy_and_replace_m2m_through_model(field: Type[ManyToManyField], field_name: str, table_name: str, parent_fields: Dict, attrs: Dict, meta: ModelMeta) -> None
```
Clones class with Through model for m2m relations, appends child name to the name
of the cloned class.
Clones non foreign keys fields from parent model, the same with database columns.
Modifies related_name with appending child table name after '_'
For table name, the table name of child is appended after '_'.
Removes the original sqlalchemy table from metadata if it was not removed.
**Arguments**:
- `field (Type[ManyToManyField])`: field with relations definition
- `field_name (str)`: name of the relation field
- `table_name (str)`: name of the table
- `parent_fields (Dict)`: dictionary of fields to copy to new models from parent
- `attrs (Dict)`: new namespace for class being constructed
- `meta (ModelMeta)`: metaclass of currently created model
<a name="models.metaclass.copy_data_from_parent_model"></a>
#### copy\_data\_from\_parent\_model
```python
copy_data_from_parent_model(base_class: Type["Model"], curr_class: type, attrs: Dict, model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
]) -> Tuple[Dict, Dict]
```
Copy the key parameters [databse, metadata, property_fields and constraints]
and fields from parent models. Overwrites them if needed.
Only abstract classes can be subclassed.
Since relation fields requires different related_name for different children
**Raises**:
- `ModelDefinitionError`: if non abstract model is subclassed
**Arguments**:
- `base_class (Model or model parent class)`: one of the parent classes
- `curr_class (Model or model parent class)`: current constructed class
- `attrs (Dict)`: new namespace for class being constructed
- `model_fields (Dict[str, BaseField])`: ormar fields in defined in current class
**Returns**:
`(Tuple[Dict, Dict])`: updated attrs and model_fields
<a name="models.metaclass.extract_from_parents_definition"></a>
#### extract\_from\_parents\_definition
```python
extract_from_parents_definition(base_class: type, curr_class: type, attrs: Dict, model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
]) -> Tuple[Dict, Dict]
```
Extracts fields from base classes if they have valid oramr fields.
If model was already parsed -> fields definitions need to be removed from class
cause pydantic complains about field re-definition so after first child
we need to extract from __parsed_fields__ not the class itself.
If the class is parsed first time annotations and field definition is parsed
from the class.__dict__.
If the class is a ormar.Model it is skipped.
**Arguments**:
- `base_class (Model or model parent class)`: one of the parent classes
- `curr_class (Model or model parent class)`: current constructed class
- `attrs (Dict)`: new namespace for class being constructed
- `model_fields (Dict[str, BaseField])`: ormar fields in defined in current class
**Returns**:
`(Tuple[Dict, Dict])`: updated attrs and model_fields
<a name="models.metaclass.ModelMeta"></a>
## ModelMeta Objects
```python
class ModelMeta()
```
Class used for type hinting.
Users can subclass this one for convenience but it's not required.
The only requirement is that ormar.Model has to have inner class with name Meta.
<a name="models.metaclass.ModelMetaclass"></a>
## ModelMetaclass Objects
```python
class ModelMetaclass(pydantic.main.ModelMetaclass)
```
<a name="models.metaclass.ModelMetaclass.__new__"></a>
#### \_\_new\_\_
```python
| __new__(mcs: "ModelMetaclass", name: str, bases: Any, attrs: dict) -> "ModelMetaclass"
```
Metaclass used by ormar Models that performs configuration
and build of ormar Models.
Sets pydantic configuration.
Extract model_fields and convert them to pydantic FieldInfo,
updates class namespace.
Extracts settings and fields from parent classes.
Fetches methods decorated with @property_field decorator
to expose them later in dict().
Construct parent pydantic Metaclass/ Model.
If class has Meta class declared (so actual ormar Models) it also:
* populate sqlalchemy columns, pkname and tables from model_fields
* register reverse relationships on related models
* registers all relations in alias manager that populates table_prefixes
* exposes alias manager on each Model
* creates QuerySet for each model and exposes it on a class
**Arguments**:
- `name (str)`: name of current class
- `bases (Tuple)`: base classes
- `attrs (Dict)`: class namespace

View File

@ -0,0 +1,14 @@
<a name="models.modelproxy"></a>
# models.modelproxy
<a name="models.modelproxy.ModelTableProxy"></a>
## ModelTableProxy Objects
```python
class ModelTableProxy(
PrefetchQueryMixin, MergeModelMixin, SavePrepareMixin, ExcludableMixin)
```
Used to combine all mixins with different set of functionalities.
One of the bases of the ormar Model class.

310
docs/api/models/model.md Normal file
View File

@ -0,0 +1,310 @@
<a name="models.model"></a>
# models.model
<a name="models.model.group_related_list"></a>
#### group\_related\_list
```python
group_related_list(list_: List) -> Dict
```
Translates the list of related strings into a dictionary.
That way nested models are grouped to traverse them in a right order
and to avoid repetition.
Sample: ["people__houses", "people__cars__models", "people__cars__colors"]
will become:
{'people': {'houses': [], 'cars': ['models', 'colors']}}
**Arguments**:
- `list_ (List[str])`: list of related models used in select related
**Returns**:
`(Dict[str, List])`: list converted to dictionary to avoid repetition and group nested models
<a name="models.model.Model"></a>
## Model Objects
```python
class Model(NewBaseModel)
```
<a name="models.model.Model.from_row"></a>
#### from\_row
```python
| @classmethod
| from_row(cls: Type[T], row: sqlalchemy.engine.ResultProxy, select_related: List = None, related_models: Any = None, previous_model: Type[T] = None, related_name: str = None, fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None) -> Optional[T]
```
Model method to convert raw sql row from database into ormar.Model instance.
Traverses nested models if they were specified in select_related for query.
Called recurrently and returns model instance if it's present in the row.
Note that it's processing one row at a time, so if there are duplicates of
parent row that needs to be joined/combined
(like parent row in sql join with 2+ child rows)
instances populated in this method are later combined in the QuerySet.
Other method working directly on raw database results is in prefetch_query,
where rows are populated in a different way as they do not have
nested models in result.
**Arguments**:
- `row (sqlalchemy.engine.result.ResultProxy)`: raw result row from the database
- `select_related (List)`: list of names of related models fetched from database
- `related_models (Union[List, Dict])`: list or dict of related models
- `previous_model (Model class)`: internal param for nested models to specify table_prefix
- `related_name (str)`: internal parameter - name of current nested model
- `fields (Optional[Union[Dict, Set]])`: fields and related model fields to include
if provided only those are included
- `exclude_fields (Optional[Union[Dict, Set]])`: fields and related model fields to exclude
excludes the fields even if they are provided in fields
**Returns**:
`(Optional[Model])`: returns model if model is populated from database
<a name="models.model.Model.populate_nested_models_from_row"></a>
#### populate\_nested\_models\_from\_row
```python
| @classmethod
| populate_nested_models_from_row(cls, item: dict, row: sqlalchemy.engine.ResultProxy, related_models: Any, fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None) -> dict
```
Traverses structure of related models and populates the nested models
from the database row.
Related models can be a list if only directly related models are to be
populated, converted to dict if related models also have their own related
models to be populated.
Recurrently calls from_row method on nested instances and create nested
instances. In the end those instances are added to the final model dictionary.
**Arguments**:
- `item (Dict)`: dictionary of already populated nested models, otherwise empty dict
- `row (sqlalchemy.engine.result.ResultProxy)`: raw result row from the database
- `related_models (Union[Dict, List])`: list or dict of related models
- `fields (Optional[Union[Dict, Set]])`: fields and related model fields to include -
if provided only those are included
- `exclude_fields (Optional[Union[Dict, Set]])`: fields and related model fields to exclude
excludes the fields even if they are provided in fields
**Returns**:
`(Dict)`: dictionary with keys corresponding to model fields names
and values are database values
<a name="models.model.Model.extract_prefixed_table_columns"></a>
#### extract\_prefixed\_table\_columns
```python
| @classmethod
| extract_prefixed_table_columns(cls, item: dict, row: sqlalchemy.engine.result.ResultProxy, table_prefix: str, fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None) -> dict
```
Extracts own fields from raw sql result, using a given prefix.
Prefix changes depending on the table's position in a join.
If the table is a main table, there is no prefix.
All joined tables have prefixes to allow duplicate column names,
as well as duplicated joins to the same table from multiple different tables.
Extracted fields populates the item dict later used to construct a Model.
Used in Model.from_row and PrefetchQuery._populate_rows methods.
**Arguments**:
- `item (Dict)`: dictionary of already populated nested models, otherwise empty dict
- `row (sqlalchemy.engine.result.ResultProxy)`: raw result row from the database
- `table_prefix (str)`: prefix of the table from AliasManager
each pair of tables have own prefix (two of them depending on direction) -
used in joins to allow multiple joins to the same table.
- `fields (Optional[Union[Dict, Set]])`: fields and related model fields to include -
if provided only those are included
- `exclude_fields (Optional[Union[Dict, Set]])`: fields and related model fields to exclude
excludes the fields even if they are provided in fields
**Returns**:
`(Dict)`: dictionary with keys corresponding to model fields names
and values are database values
<a name="models.model.Model.upsert"></a>
#### upsert
```python
| async upsert(**kwargs: Any) -> T
```
Performs either a save or an update depending on the presence of the pk.
If the pk field is filled it's an update, otherwise the save is performed.
For save kwargs are ignored, used only in update if provided.
**Arguments**:
- `kwargs (Any)`: list of fields to update
**Returns**:
`(Model)`: saved Model
<a name="models.model.Model.save"></a>
#### save
```python
| async save() -> T
```
Performs a save of given Model instance.
If primary key is already saved, db backend will throw integrity error.
Related models are saved by pk number, reverse relation and many to many fields
are not saved - use corresponding relations methods.
If there are fields with server_default set and those fields
are not already filled save will trigger also a second query
to refreshed the fields populated server side.
Does not recognize if model was previously saved.
If you want to perform update or insert depending on the pk
fields presence use upsert.
Sends pre_save and post_save signals.
Sets model save status to True.
**Returns**:
`(Model)`: saved Model
<a name="models.model.Model.save_related"></a>
#### save\_related
```python
| async save_related(follow: bool = False, visited: Set = None, update_count: int = 0) -> int
```
Triggers a upsert method on all related models
if the instances are not already saved.
By default saves only the directly related ones.
If follow=True is set it saves also related models of related models.
To not get stuck in an infinite loop as related models also keep a relation
to parent model visited models set is kept.
That way already visited models that are nested are saved, but the save do not
follow them inside. So Model A -> Model B -> Model A -> Model C will save second
Model A but will never follow into Model C.
Nested relations of those kind need to be persisted manually.
**Arguments**:
- `follow (bool)`: flag to trigger deep save -
by default only directly related models are saved
with follow=True also related models of related models are saved
- `visited (Set)`: internal parameter for recursive calls - already visited models
- `update_count (int)`: internal parameter for recursive calls -
number of updated instances
**Returns**:
`(int)`: number of updated/saved models
<a name="models.model.Model._update_and_follow"></a>
#### \_update\_and\_follow
```python
| @staticmethod
| async _update_and_follow(rel: T, follow: bool, visited: Set, update_count: int) -> Tuple[int, Set]
```
Internal method used in save_related to follow related models and update numbers
of updated related instances.
**Arguments**:
- `rel (Model)`: Model to follow
- `follow (bool)`: flag to trigger deep save -
by default only directly related models are saved
with follow=True also related models of related models are saved
- `visited (Set)`: internal parameter for recursive calls - already visited models
- `update_count (int)`: internal parameter for recursive calls -
number of updated instances
**Returns**:
`(Tuple[int, Set])`: tuple of update count and visited
<a name="models.model.Model.update"></a>
#### update
```python
| async update(**kwargs: Any) -> T
```
Performs update of Model instance in the database.
Fields can be updated before or you can pass them as kwargs.
Sends pre_update and post_update signals.
Sets model save status to True.
**Raises**:
- `ModelPersistenceError`: If the pk column is not set
**Arguments**:
- `kwargs (Any)`: list of fields to update as field=value pairs
**Returns**:
`(Model)`: updated Model
<a name="models.model.Model.delete"></a>
#### delete
```python
| async delete() -> int
```
Removes the Model instance from the database.
Sends pre_delete and post_delete signals.
Sets model save status to False.
Note it does not delete the Model itself (python object).
So you can delete and later save (since pk is deleted no conflict will arise)
or update and the Model will be saved in database again.
**Returns**:
`(int)`: number of deleted rows (for some backends)
<a name="models.model.Model.load"></a>
#### load
```python
| async load() -> T
```
Allow to refresh existing Models fields from database.
Be careful as the related models can be overwritten by pk_only models in load.
Does NOT refresh the related models fields if they were loaded before.
**Raises**:
- `NoMatch`: If given pk is not found in database.
**Returns**:
`(Model)`: reloaded Model

View File

@ -0,0 +1,509 @@
<a name="models.newbasemodel"></a>
# models.newbasemodel
<a name="models.newbasemodel.NewBaseModel"></a>
## NewBaseModel Objects
```python
class NewBaseModel(pydantic.BaseModel, ModelTableProxy, metaclass=ModelMetaclass)
```
Main base class of ormar Model.
Inherits from pydantic BaseModel and has all mixins combined in ModelTableProxy.
Constructed with ModelMetaclass which in turn also inherits pydantic metaclass.
Abstracts away all internals and helper functions, so final Model class has only
the logic concerned with database connection and data persistance.
<a name="models.newbasemodel.NewBaseModel.__init__"></a>
#### \_\_init\_\_
```python
| __init__(*args: Any, **kwargs: Any) -> None
```
Initializer that creates a new ormar Model that is also pydantic Model at the
same time.
Passed keyword arguments can be only field names and their corresponding values
as those will be passed to pydantic validation that will complain if extra
params are passed.
If relations are defined each relation is expanded and children models are also
initialized and validated. Relation from both sides is registered so you can
access related models from both sides.
Json fields are automatically loaded/dumped if needed.
Models marked as abstract=True in internal Meta class cannot be initialized.
Accepts also special __pk_only__ flag that indicates that Model is constructed
only with primary key value (so no other fields, it's a child model on other
Model), that causes skipping the validation, that's the only case when the
validation can be skipped.
Accepts also special __excluded__ parameter that contains a set of fields that
should be explicitly set to None, as otherwise pydantic will try to populate
them with their default values if default is set.
**Raises**:
- `ModelError`: if abstract model is initialized or unknown field is passed
**Arguments**:
- `args (Any)`: ignored args
- `kwargs (Any)`: keyword arguments - all fields values and some special params
<a name="models.newbasemodel.NewBaseModel.__setattr__"></a>
#### \_\_setattr\_\_
```python
| __setattr__(name: str, value: Any) -> None
```
Overwrites setattr in object to allow for special behaviour of certain params.
Parameter "pk" is translated into actual primary key field name.
Relations are expanded (child model constructed if needed) and registered on
both ends of the relation. The related models are handled by RelationshipManager
exposed at _orm param.
Json fields converted if needed.
Setting pk, foreign key value or any other field value sets Model save status
to False. Setting a reverse relation or many to many relation does not as it
does not modify the state of the model (but related model or through model).
To short circuit all checks and expansions the set of attribute names present
on each model is gathered into _quick_access_fields that is looked first and
if field is in this set the object setattr is called directly.
**Arguments**:
- `name (str)`: name of the attribute to set
- `value (Any)`: value of the attribute to set
**Returns**:
`(None)`: None
<a name="models.newbasemodel.NewBaseModel.__getattribute__"></a>
#### \_\_getattribute\_\_
```python
| __getattribute__(item: str) -> Any
```
Because we need to overwrite getting the attribute by ormar instead of pydantic
as well as returning related models and not the value stored on the model the
__getattribute__ needs to be used not __getattr__.
It's used to access all attributes so it can be a big overhead that's why a
number of short circuits is used.
To short circuit all checks and expansions the set of attribute names present
on each model is gathered into _quick_access_fields that is looked first and
if field is in this set the object setattr is called directly.
To avoid recursion object's getattribute is used to actually get the attribute
value from the model after the checks.
Even the function calls are constructed with objects functions.
Parameter "pk" is translated into actual primary key field name.
Relations are returned so the actual related model is returned and not current
model's field. The related models are handled by RelationshipManager exposed
at _orm param.
Json fields are converted if needed.
**Arguments**:
- `item (str)`: name of the attribute to retrieve
**Returns**:
`(Any)`: value of the attribute
<a name="models.newbasemodel.NewBaseModel._extract_related_model_instead_of_field"></a>
#### \_extract\_related\_model\_instead\_of\_field
```python
| _extract_related_model_instead_of_field(item: str) -> Optional[Union["T", Sequence["T"]]]
```
Retrieves the related model/models from RelationshipManager.
**Arguments**:
- `item (str)`: name of the relation
**Returns**:
`(Optional[Union[Model, List[Model]]])`: related model, list of related models or None
<a name="models.newbasemodel.NewBaseModel.__eq__"></a>
#### \_\_eq\_\_
```python
| __eq__(other: object) -> bool
```
Compares other model to this model. when == is called.
**Arguments**:
- `other (object)`: other model to compare
**Returns**:
`(bool)`: result of comparison
<a name="models.newbasemodel.NewBaseModel.__same__"></a>
#### \_\_same\_\_
```python
| __same__(other: "NewBaseModel") -> bool
```
Used by __eq__, compares other model to this model.
Compares:
* _orm_ids,
* primary key values if it's set
* dictionary of own fields (excluding relations)
**Arguments**:
- `other (NewBaseModel)`: model to compare to
**Returns**:
`(bool)`: result of comparison
<a name="models.newbasemodel.NewBaseModel.get_name"></a>
#### get\_name
```python
| @classmethod
| get_name(cls, lower: bool = True) -> str
```
Returns name of the Model class, by default lowercase.
**Arguments**:
- `lower (bool)`: flag if name should be set to lowercase
**Returns**:
`(str)`: name of the model
<a name="models.newbasemodel.NewBaseModel.pk_column"></a>
#### pk\_column
```python
| @property
| pk_column() -> sqlalchemy.Column
```
Retrieves primary key sqlalchemy column from models Meta.table.
Each model has to have primary key.
Only one primary key column is allowed.
**Returns**:
`(sqlalchemy.Column)`: primary key sqlalchemy column
<a name="models.newbasemodel.NewBaseModel.saved"></a>
#### saved
```python
| @property
| saved() -> bool
```
Saved status of the model. Changed by setattr and loading from db
<a name="models.newbasemodel.NewBaseModel.signals"></a>
#### signals
```python
| @property
| signals() -> "SignalEmitter"
```
Exposes signals from model Meta
<a name="models.newbasemodel.NewBaseModel.pk_type"></a>
#### pk\_type
```python
| @classmethod
| pk_type(cls) -> Any
```
Shortcut to models primary key field type
<a name="models.newbasemodel.NewBaseModel.db_backend_name"></a>
#### db\_backend\_name
```python
| @classmethod
| db_backend_name(cls) -> str
```
Shortcut to database dialect,
cause some dialect require different treatment
<a name="models.newbasemodel.NewBaseModel.remove"></a>
#### remove
```python
| remove(parent: "T", name: str) -> None
```
Removes child from relation with given name in RelationshipManager
<a name="models.newbasemodel.NewBaseModel.set_save_status"></a>
#### set\_save\_status
```python
| set_save_status(status: bool) -> None
```
Sets value of the save status
<a name="models.newbasemodel.NewBaseModel.get_properties"></a>
#### get\_properties
```python
| @classmethod
| get_properties(cls, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None]) -> Set[str]
```
Returns a set of names of functions/fields decorated with
@property_field decorator.
They are added to dictionary when called directly and therefore also are
present in fastapi responses.
**Arguments**:
- `include (Union[Set, Dict, None])`: fields to include
- `exclude (Union[Set, Dict, None])`: fields to exclude
**Returns**:
`(Set[str])`: set of property fields names
<a name="models.newbasemodel.NewBaseModel._get_related_not_excluded_fields"></a>
#### \_get\_related\_not\_excluded\_fields
```python
| _get_related_not_excluded_fields(include: Optional[Dict], exclude: Optional[Dict]) -> List
```
Returns related field names applying on them include and exclude set.
**Arguments**:
- `include (Union[Set, Dict, None])`: fields to include
- `exclude (Union[Set, Dict, None])`: fields to exclude
**Returns**:
`(List of fields with relations that is not excluded)`:
<a name="models.newbasemodel.NewBaseModel._extract_nested_models_from_list"></a>
#### \_extract\_nested\_models\_from\_list
```python
| @staticmethod
| _extract_nested_models_from_list(models: MutableSequence, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None]) -> List
```
Converts list of models into list of dictionaries.
**Arguments**:
- `models (List)`: List of models
- `include (Union[Set, Dict, None])`: fields to include
- `exclude (Union[Set, Dict, None])`: fields to exclude
**Returns**:
`(List[Dict])`: list of models converted to dictionaries
<a name="models.newbasemodel.NewBaseModel._skip_ellipsis"></a>
#### \_skip\_ellipsis
```python
| _skip_ellipsis(items: Union[Set, Dict, None], key: str) -> Union[Set, Dict, None]
```
Helper to traverse the include/exclude dictionaries.
In dict() Ellipsis should be skipped as it indicates all fields required
and not the actual set/dict with fields names.
**Arguments**:
- `items (Union[Set, Dict, None])`: current include/exclude value
- `key (str)`: key for nested relations to check
**Returns**:
`(Union[Set, Dict, None])`: nested value of the items
<a name="models.newbasemodel.NewBaseModel._extract_nested_models"></a>
#### \_extract\_nested\_models
```python
| _extract_nested_models(nested: bool, dict_instance: Dict, include: Optional[Dict], exclude: Optional[Dict]) -> Dict
```
Traverse nested models and converts them into dictionaries.
Calls itself recursively if needed.
**Arguments**:
- `nested (bool)`: flag if current instance is nested
- `dict_instance (Dict)`: current instance dict
- `include (Optional[Dict])`: fields to include
- `exclude (Optional[Dict])`: fields to exclude
**Returns**:
`(Dict)`: current model dict with child models converted to dictionaries
<a name="models.newbasemodel.NewBaseModel.dict"></a>
#### dict
```python
| dict(*, include: Union[Set, Dict] = None, exclude: Union[Set, Dict] = None, by_alias: bool = False, skip_defaults: bool = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, nested: bool = False) -> "DictStrAny"
```
Generate a dictionary representation of the model,
optionally specifying which fields to include or exclude.
Nested models are also parsed to dictionaries.
Additionally fields decorated with @property_field are also added.
**Arguments**:
- `include (Union[Set, Dict, None])`: fields to include
- `exclude (Union[Set, Dict, None])`: fields to exclude
- `by_alias (bool)`: flag to get values by alias - passed to pydantic
- `skip_defaults (bool)`: flag to not set values - passed to pydantic
- `exclude_unset (bool)`: flag to exclude not set values - passed to pydantic
- `exclude_defaults (bool)`: flag to exclude default values - passed to pydantic
- `exclude_none (bool)`: flag to exclude None values - passed to pydantic
- `nested (bool)`: flag if the current model is nested
**Returns**:
`()`:
<a name="models.newbasemodel.NewBaseModel.update_from_dict"></a>
#### update\_from\_dict
```python
| update_from_dict(value_dict: Dict) -> "NewBaseModel"
```
Updates self with values of fields passed in the dictionary.
**Arguments**:
- `value_dict (Dict)`: dictionary of fields names and values
**Returns**:
`(NewBaseModel)`: self
<a name="models.newbasemodel.NewBaseModel._convert_json"></a>
#### \_convert\_json
```python
| _convert_json(column_name: str, value: Any, op: str) -> Union[str, Dict]
```
Converts value to/from json if needed (for Json columns).
**Arguments**:
- `column_name (str)`: name of the field
- `value (Any)`: value fo the field
- `op (str)`: operator on json
**Returns**:
`(Any)`: converted value if needed, else original value
<a name="models.newbasemodel.NewBaseModel._is_conversion_to_json_needed"></a>
#### \_is\_conversion\_to\_json\_needed
```python
| _is_conversion_to_json_needed(column_name: str) -> bool
```
Checks if given column name is related to JSON field.
**Arguments**:
- `column_name (str)`: name of the field
**Returns**:
`(bool)`: result of the check
<a name="models.newbasemodel.NewBaseModel._extract_own_model_fields"></a>
#### \_extract\_own\_model\_fields
```python
| _extract_own_model_fields() -> Dict
```
Returns a dictionary with field names and values for fields that are not
relations fields (ForeignKey, ManyToMany etc.)
**Returns**:
`(Dict)`: dictionary of fields names and values.
<a name="models.newbasemodel.NewBaseModel._extract_model_db_fields"></a>
#### \_extract\_model\_db\_fields
```python
| _extract_model_db_fields() -> Dict
```
Returns a dictionary with field names and values for fields that are stored in
current model's table.
That includes own non-relational fields ang foreign key fields.
**Returns**:
`(Dict)`: dictionary of fields names and values.
<a name="models.newbasemodel.NewBaseModel.get_relation_model_id"></a>
#### get\_relation\_model\_id
```python
| get_relation_model_id(target_field: Type["BaseField"]) -> Optional[int]
```
Returns an id of the relation side model to use in prefetch query.
**Arguments**:
- `target_field (Type["BaseField"])`: field with relation definition
**Returns**:
`(Optional[int])`: value of pk if set

View File

@ -0,0 +1,161 @@
<a name="queryset.clause"></a>
# queryset.clause
<a name="queryset.clause.QueryClause"></a>
## QueryClause Objects
```python
class QueryClause()
```
Constructs where clauses from strings passed as arguments
<a name="queryset.clause.QueryClause.filter"></a>
#### filter
```python
| filter(**kwargs: Any) -> Tuple[List[sqlalchemy.sql.expression.TextClause], List[str]]
```
Main external access point that processes the clauses into sqlalchemy text
clauses and updates select_related list with implicit related tables
mentioned in select_related strings but not included in select_related.
**Arguments**:
- `kwargs (Any)`: key, value pair with column names and values
**Returns**:
`(Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]])`: Tuple with list of where clauses and updated select_related list
<a name="queryset.clause.QueryClause._populate_filter_clauses"></a>
#### \_populate\_filter\_clauses
```python
| _populate_filter_clauses(**kwargs: Any) -> Tuple[List[sqlalchemy.sql.expression.TextClause], List[str]]
```
Iterates all clauses and extracts used operator and field from related
models if needed. Based on the chain of related names the target table
is determined and the final clause is escaped if needed and compiled.
**Arguments**:
- `kwargs (Any)`: key, value pair with column names and values
**Returns**:
`(Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]])`: Tuple with list of where clauses and updated select_related list
<a name="queryset.clause.QueryClause._process_column_clause_for_operator_and_value"></a>
#### \_process\_column\_clause\_for\_operator\_and\_value
```python
| _process_column_clause_for_operator_and_value(value: Any, op: str, column: sqlalchemy.Column, table: sqlalchemy.Table, table_prefix: str) -> sqlalchemy.sql.expression.TextClause
```
Escapes characters if it's required.
Substitutes values of the models if value is a ormar Model with its pk value.
Compiles the clause.
**Arguments**:
- `value (Any)`: value of the filter
- `op (str)`: filter operator
- `column (sqlalchemy.sql.schema.Column)`: column on which filter should be applied
- `table (sqlalchemy.sql.schema.Table)`: table on which filter should be applied
- `table_prefix (str)`: prefix from AliasManager
**Returns**:
`(sqlalchemy.sql.elements.TextClause)`: complied and escaped clause
<a name="queryset.clause.QueryClause._determine_filter_target_table"></a>
#### \_determine\_filter\_target\_table
```python
| _determine_filter_target_table(related_parts: List[str], select_related: List[str]) -> Tuple[List[str], str, Type["Model"]]
```
Adds related strings to select_related list otherwise the clause would fail as
the required columns would not be present. That means that select_related
list is filled with missing values present in filters.
Walks the relation to retrieve the actual model on which the clause should be
constructed, extracts alias based on last relation leading to target model.
**Arguments**:
- `related_parts (List[str])`: list of split parts of related string
- `select_related (List[str])`: list of related models
**Returns**:
`(Tuple[List[str], str, Type[Model]])`: list of related models, table_prefix, final model class
<a name="queryset.clause.QueryClause._compile_clause"></a>
#### \_compile\_clause
```python
| _compile_clause(clause: sqlalchemy.sql.expression.BinaryExpression, column: sqlalchemy.Column, table: sqlalchemy.Table, table_prefix: str, modifiers: Dict) -> sqlalchemy.sql.expression.TextClause
```
Compiles the clause to str using appropriate database dialect, replace columns
names with aliased names and converts it back to TextClause.
**Arguments**:
- `clause (sqlalchemy.sql.elements.BinaryExpression)`: original not compiled clause
- `column (sqlalchemy.sql.schema.Column)`: column on which filter should be applied
- `table (sqlalchemy.sql.schema.Table)`: table on which filter should be applied
- `table_prefix (str)`: prefix from AliasManager
- `modifiers (Dict[str, NoneType])`: sqlalchemy modifiers - used only to escape chars here
**Returns**:
`(sqlalchemy.sql.elements.TextClause)`: compiled and escaped clause
<a name="queryset.clause.QueryClause._escape_characters_in_clause"></a>
#### \_escape\_characters\_in\_clause
```python
| @staticmethod
| _escape_characters_in_clause(op: str, value: Any) -> Tuple[Any, bool]
```
Escapes the special characters ["%", "_"] if needed.
Adds `%` for `like` queries.
**Raises**:
- `QueryDefinitionError`: if contains or icontains is used with
ormar model instance
**Arguments**:
- `op (str)`: operator used in query
- `value (Any)`: value of the filter
**Returns**:
`(Tuple[Any, bool])`: escaped value and flag if escaping is needed
<a name="queryset.clause.QueryClause._extract_operator_field_and_related"></a>
#### \_extract\_operator\_field\_and\_related
```python
| @staticmethod
| _extract_operator_field_and_related(parts: List[str]) -> Tuple[str, str, Optional[List]]
```
Splits filter query key and extracts required parts.
**Arguments**:
- `parts (List[str])`: split filter query key
**Returns**:
`(Tuple[str, str, Optional[List]])`: operator, field_name, list of related parts

View File

@ -0,0 +1,29 @@
<a name="queryset.filter_query"></a>
# queryset.filter\_query
<a name="queryset.filter_query.FilterQuery"></a>
## FilterQuery Objects
```python
class FilterQuery()
```
Modifies the select query with given list of where/filter clauses.
<a name="queryset.filter_query.FilterQuery.apply"></a>
#### apply
```python
| apply(expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select
```
Applies all filter clauses if set.
**Arguments**:
- `expr (sqlalchemy.sql.selectable.Select)`: query to modify
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: modified query

243
docs/api/query-set/join.md Normal file
View File

@ -0,0 +1,243 @@
<a name="queryset.join"></a>
# queryset.join
<a name="queryset.join.JoinParameters"></a>
## JoinParameters Objects
```python
class JoinParameters(NamedTuple)
```
Named tuple that holds set of parameters passed during join construction.
<a name="queryset.join.SqlJoin"></a>
## SqlJoin Objects
```python
class SqlJoin()
```
<a name="queryset.join.SqlJoin.alias_manager"></a>
#### alias\_manager
```python
| @staticmethod
| alias_manager(model_cls: Type["Model"]) -> AliasManager
```
Shortcut for ormars model AliasManager stored on Meta.
**Arguments**:
- `model_cls (Type[Model])`: ormar Model class
**Returns**:
`(AliasManager)`: alias manager from model's Meta
<a name="queryset.join.SqlJoin.on_clause"></a>
#### on\_clause
```python
| @staticmethod
| on_clause(previous_alias: str, alias: str, from_clause: str, to_clause: str) -> text
```
Receives aliases and names of both ends of the join and combines them
into one text clause used in joins.
**Arguments**:
- `previous_alias (str)`: alias of previous table
- `alias (str)`: alias of current table
- `from_clause (str)`: from table name
- `to_clause (str)`: to table name
**Returns**:
`(sqlalchemy.text)`: clause combining all strings
<a name="queryset.join.SqlJoin.update_inclusions"></a>
#### update\_inclusions
```python
| @staticmethod
| update_inclusions(model_cls: Type["Model"], fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]], nested_name: str) -> Tuple[Optional[Union[Dict, Set]], Optional[Union[Dict, Set]]]
```
Extract nested fields and exclude_fields if applicable.
**Arguments**:
- `model_cls (Type["Model"])`: ormar model class
- `fields (Optional[Union[Set, Dict]])`: fields to include
- `exclude_fields (Optional[Union[Set, Dict]])`: fields to exclude
- `nested_name (str)`: name of the nested field
**Returns**:
`(Tuple[Optional[Union[Dict, Set]], Optional[Union[Dict, Set]]])`: updated exclude and include fields from nested objects
<a name="queryset.join.SqlJoin.build_join"></a>
#### build\_join
```python
| build_join(item: str, join_parameters: JoinParameters) -> Tuple[List, sqlalchemy.sql.select, List, OrderedDict]
```
Main external access point for building a join.
Splits the join definition, updates fields and exclude_fields if needed,
handles switching to through models for m2m relations, returns updated lists of
used_aliases and sort_orders.
**Arguments**:
- `item (str)`: string with join definition
- `join_parameters (JoinParameters)`: parameters from previous/ current join
**Returns**:
`(Tuple[List[str], Join, List[TextClause], collections.OrderedDict])`: list of used aliases, select from, list of aliased columns, sort orders
<a name="queryset.join.SqlJoin._build_join_parameters"></a>
#### \_build\_join\_parameters
```python
| _build_join_parameters(part: str, join_params: JoinParameters, fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]], is_multi: bool = False) -> JoinParameters
```
Updates used_aliases to not join multiple times to the same table.
Updates join parameters with new values.
**Arguments**:
- `part (str)`: part of the join str definition
- `join_params (JoinParameters)`: parameters from previous/ current join
- `fields (Optional[Union[Set, Dict]])`: fields to include
- `exclude_fields (Optional[Union[Set, Dict]])`: fields to exclude
- `is_multi (bool)`: flag if the relation is m2m
**Returns**:
`(ormar.queryset.join.JoinParameters)`: updated join parameters
<a name="queryset.join.SqlJoin._process_join"></a>
#### \_process\_join
```python
| _process_join(join_params: JoinParameters, is_multi: bool, model_cls: Type["Model"], part: str, alias: str, fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]]) -> None
```
Resolves to and from column names and table names.
Produces on_clause.
Performs actual join updating select_from parameter.
Adds aliases of required column to list of columns to include in query.
Updates the used aliases list directly.
Process order_by causes for non m2m relations.
**Arguments**:
- `join_params (JoinParameters)`: parameters from previous/ current join
- `is_multi (bool)`: flag if it's m2m relation
- `model_cls (ormar.models.metaclass.ModelMetaclass)`:
- `part (str)`: name of the field used in join
- `alias (str)`: alias of the current join
- `fields (Optional[Union[Set, Dict]])`: fields to include
- `exclude_fields (Optional[Union[Set, Dict]])`: fields to exclude
<a name="queryset.join.SqlJoin._switch_many_to_many_order_columns"></a>
#### \_switch\_many\_to\_many\_order\_columns
```python
| _switch_many_to_many_order_columns(part: str, new_part: str) -> None
```
Substitutes the name of the relation with actual model name in m2m order bys.
**Arguments**:
- `part (str)`: name of the field with relation
- `new_part (str)`: name of the target model
<a name="queryset.join.SqlJoin._check_if_condition_apply"></a>
#### \_check\_if\_condition\_apply
```python
| @staticmethod
| _check_if_condition_apply(condition: List, part: str) -> bool
```
Checks filter conditions to find if they apply to current join.
**Arguments**:
- `condition (List[str])`: list of parts of condition split by '__'
- `part (str)`: name of the current relation join.
**Returns**:
`(bool)`: result of the check
<a name="queryset.join.SqlJoin.set_aliased_order_by"></a>
#### set\_aliased\_order\_by
```python
| set_aliased_order_by(condition: List[str], alias: str, to_table: str, model_cls: Type["Model"]) -> None
```
Substitute hyphens ('-') with descending order.
Construct actual sqlalchemy text clause using aliased table and column name.
**Arguments**:
- `condition (List[str])`: list of parts of a current condition split by '__'
- `alias (str)`: alias of the table in current join
- `to_table (sqlalchemy.sql.elements.quoted_name)`: target table
- `model_cls (ormar.models.metaclass.ModelMetaclass)`: ormar model class
<a name="queryset.join.SqlJoin.get_order_bys"></a>
#### get\_order\_bys
```python
| get_order_bys(alias: str, to_table: str, pkname_alias: str, part: str, model_cls: Type["Model"]) -> None
```
Triggers construction of order bys if they are given.
Otherwise by default each table is sorted by a primary key column asc.
**Arguments**:
- `alias (str)`: alias of current table in join
- `to_table (sqlalchemy.sql.elements.quoted_name)`: target table
- `pkname_alias (str)`: alias of the primary key column
- `part (str)`: name of the current relation join
- `model_cls (Type[Model])`: ormar model class
<a name="queryset.join.SqlJoin.get_to_and_from_keys"></a>
#### get\_to\_and\_from\_keys
```python
| @staticmethod
| get_to_and_from_keys(join_params: JoinParameters, is_multi: bool, model_cls: Type["Model"], part: str) -> Tuple[str, str]
```
Based on the relation type, name of the relation and previous models and parts
stored in JoinParameters it resolves the current to and from keys, which are
different for ManyToMany relation, ForeignKey and reverse part of relations.
**Arguments**:
- `join_params (JoinParameters)`: parameters from previous/ current join
- `is_multi (bool)`: flag if the relation is of m2m type
- `model_cls (Type[Model])`: ormar model class
- `part (str)`: name of the current relation join
**Returns**:
`(Tuple[str, str])`: to key and from key

View File

@ -0,0 +1,29 @@
<a name="queryset.limit_query"></a>
# queryset.limit\_query
<a name="queryset.limit_query.LimitQuery"></a>
## LimitQuery Objects
```python
class LimitQuery()
```
Modifies the select query with limit clause.
<a name="queryset.limit_query.LimitQuery.apply"></a>
#### apply
```python
| apply(expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select
```
Applies the limit clause.
**Arguments**:
- `expr (sqlalchemy.sql.selectable.Select)`: query to modify
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: modified query

View File

@ -0,0 +1,29 @@
<a name="queryset.offset_query"></a>
# queryset.offset\_query
<a name="queryset.offset_query.OffsetQuery"></a>
## OffsetQuery Objects
```python
class OffsetQuery()
```
Modifies the select query with offset if set
<a name="queryset.offset_query.OffsetQuery.apply"></a>
#### apply
```python
| apply(expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select
```
Applies the offset clause.
**Arguments**:
- `expr (sqlalchemy.sql.selectable.Select)`: query to modify
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: modified query

View File

@ -0,0 +1,29 @@
<a name="queryset.order_query"></a>
# queryset.order\_query
<a name="queryset.order_query.OrderQuery"></a>
## OrderQuery Objects
```python
class OrderQuery()
```
Modifies the select query with given list of order_by clauses.
<a name="queryset.order_query.OrderQuery.apply"></a>
#### apply
```python
| apply(expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select
```
Applies all order_by clauses if set.
**Arguments**:
- `expr (sqlalchemy.sql.selectable.Select)`: query to modify
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: modified query

View File

@ -0,0 +1,345 @@
<a name="queryset.prefetch_query"></a>
# queryset.prefetch\_query
<a name="queryset.prefetch_query.add_relation_field_to_fields"></a>
#### add\_relation\_field\_to\_fields
```python
add_relation_field_to_fields(fields: Union[Set[Any], Dict[Any, Any], None], related_field_name: str) -> Union[Set[Any], Dict[Any, Any], None]
```
Adds related field into fields to include as otherwise it would be skipped.
Related field is added only if fields are already populated.
Empty fields implies all fields.
**Arguments**:
- `fields (Dict)`: Union[Set[Any], Dict[Any, Any], None]
- `related_field_name (str)`: name of the field with relation
**Returns**:
`(Union[Set[Any], Dict[Any, Any], None])`: updated fields dict
<a name="queryset.prefetch_query.sort_models"></a>
#### sort\_models
```python
sort_models(models: List["Model"], orders_by: Dict) -> List["Model"]
```
Since prefetch query gets all related models by ids the sorting needs to happen in
python. Since by default models are already sorted by id here we resort only if
order_by parameters was set.
**Arguments**:
- `models (List[tests.test_prefetch_related.Division])`: list of models already fetched from db
- `orders_by (Dict[str, str])`: order by dictionary
**Returns**:
`(List[tests.test_prefetch_related.Division])`: sorted list of models
<a name="queryset.prefetch_query.set_children_on_model"></a>
#### set\_children\_on\_model
```python
set_children_on_model(model: "Model", related: str, children: Dict, model_id: int, models: Dict, orders_by: Dict) -> None
```
Extract ids of child models by given relation id key value.
Based on those ids the actual children model instances are fetched from
already fetched data.
If needed the child models are resorted according to passed orders_by dict.
Also relation is registered as each child is set as parent related field name value.
**Arguments**:
- `model (Model)`: parent model instance
- `related (str)`: name of the related field
- `children (Dict[int, set])`: dictionary of children ids/ related field value
- `model_id (int)`: id of the model on which children should be set
- `models (Dict)`: dictionary of child models instances
- `orders_by (Dict)`: order_by dictionary
<a name="queryset.prefetch_query.PrefetchQuery"></a>
## PrefetchQuery Objects
```python
class PrefetchQuery()
```
Query used to fetch related models in subsequent queries.
Each model is fetched only ones by the name of the relation.
That means that for each prefetch_related entry next query is issued to database.
<a name="queryset.prefetch_query.PrefetchQuery.prefetch_related"></a>
#### prefetch\_related
```python
| async prefetch_related(models: Sequence["Model"], rows: List) -> Sequence["Model"]
```
Main entry point for prefetch_query.
Receives list of already initialized parent models with all children from
select_related already populated. Receives also list of row sql result rows
as it's quicker to extract ids that way instead of calling each model.
Returns list with related models already prefetched and set.
**Arguments**:
- `models (List[Model])`: list of already instantiated models from main query
- `rows (List[sqlalchemy.engine.result.RowProxy])`: row sql result of the main query before the prefetch
**Returns**:
`(List[Model])`: list of models with children prefetched
<a name="queryset.prefetch_query.PrefetchQuery._extract_ids_from_raw_data"></a>
#### \_extract\_ids\_from\_raw\_data
```python
| _extract_ids_from_raw_data(parent_model: Type["Model"], column_name: str) -> Set
```
Iterates over raw rows and extract id values of relation columns by using
prefixed column name.
**Arguments**:
- `parent_model (Type[Model])`: ormar model class
- `column_name (str)`: name of the relation column which is a key column
**Returns**:
`(set)`: set of ids of related model that should be extracted
<a name="queryset.prefetch_query.PrefetchQuery._extract_ids_from_preloaded_models"></a>
#### \_extract\_ids\_from\_preloaded\_models
```python
| _extract_ids_from_preloaded_models(parent_model: Type["Model"], column_name: str) -> Set
```
Extracts relation ids from already populated models if they were included
in the original query before.
**Arguments**:
- `parent_model (Type["Model"])`: model from which related ids should be extracted
- `column_name (str)`: name of the relation column which is a key column
**Returns**:
`(set)`: set of ids of related model that should be extracted
<a name="queryset.prefetch_query.PrefetchQuery._extract_required_ids"></a>
#### \_extract\_required\_ids
```python
| _extract_required_ids(parent_model: Type["Model"], reverse: bool, related: str) -> Set
```
Delegates extraction of the fields to either get ids from raw sql response
or from already populated models.
**Arguments**:
- `parent_model (Type["Model"])`: model from which related ids should be extracted
- `reverse (bool)`: flag if the relation is reverse
- `related (str)`: name of the field with relation
**Returns**:
`(set)`: set of ids of related model that should be extracted
<a name="queryset.prefetch_query.PrefetchQuery._get_filter_for_prefetch"></a>
#### \_get\_filter\_for\_prefetch
```python
| _get_filter_for_prefetch(parent_model: Type["Model"], target_model: Type["Model"], reverse: bool, related: str) -> List
```
Populates where clause with condition to return only models within the
set of extracted ids.
If there are no ids for relation the empty list is returned.
**Arguments**:
- `parent_model (Type["Model"])`: model from which related ids should be extracted
- `target_model (Type["Model"])`: model to which relation leads to
- `reverse (bool)`: flag if the relation is reverse
- `related (str)`: name of the field with relation
**Returns**:
`(List[sqlalchemy.sql.elements.TextClause])`:
<a name="queryset.prefetch_query.PrefetchQuery._populate_nested_related"></a>
#### \_populate\_nested\_related
```python
| _populate_nested_related(model: "Model", prefetch_dict: Dict, orders_by: Dict) -> "Model"
```
Populates all related models children of parent model that are
included in prefetch query.
**Arguments**:
- `model (Model)`: ormar model instance
- `prefetch_dict (Dict)`: dictionary of models to prefetch
- `orders_by (Dict)`: dictionary of order bys
**Returns**:
`(Model)`: model with children populated
<a name="queryset.prefetch_query.PrefetchQuery._prefetch_related_models"></a>
#### \_prefetch\_related\_models
```python
| async _prefetch_related_models(models: Sequence["Model"], rows: List) -> Sequence["Model"]
```
Main method of the query.
Translates select nad prefetch list into dictionaries to avoid querying the
same related models multiple times.
Keeps the list of already extracted models.
Extracts the related models from the database and later populate all children
on each of the parent models from list.
**Arguments**:
- `models (List[Model])`: list of parent models from main query
- `rows (List[sqlalchemy.engine.result.RowProxy])`: raw response from sql query
**Returns**:
`(List[Model])`: list of models with prefetch children populated
<a name="queryset.prefetch_query.PrefetchQuery._extract_related_models"></a>
#### \_extract\_related\_models
```python
| async _extract_related_models(related: str, target_model: Type["Model"], prefetch_dict: Dict, select_dict: Dict, fields: Union[Set[Any], Dict[Any, Any], None], exclude_fields: Union[Set[Any], Dict[Any, Any], None], orders_by: Dict) -> None
```
Constructs queries with required ids and extracts data with fields that should
be included/excluded.
Runs the queries against the database and populated dictionaries with ids and
with actual extracted children models.
Calls itself recurrently to extract deeper nested relations of related model.
**Arguments**:
- `related (str)`: name of the relation
- `target_model (Type[Model])`: model to which relation leads to
- `prefetch_dict (Dict)`: prefetch related list converted into dictionary
- `select_dict (Dict)`: select related list converted into dictionary
- `fields (Union[Set[Any], Dict[Any, Any], None])`: fields to include
- `exclude_fields (Union[Set[Any], Dict[Any, Any], None])`: fields to exclude
- `orders_by (Dict)`: dictionary of order bys clauses
**Returns**:
`(None)`: None
<a name="queryset.prefetch_query.PrefetchQuery._run_prefetch_query"></a>
#### \_run\_prefetch\_query
```python
| async _run_prefetch_query(target_field: Type["BaseField"], fields: Union[Set[Any], Dict[Any, Any], None], exclude_fields: Union[Set[Any], Dict[Any, Any], None], filter_clauses: List) -> Tuple[str, List]
```
Actually runs the queries against the database and populates the raw response
for given related model.
Returns table prefix as it's later needed to eventually initialize the children
models.
**Arguments**:
- `target_field (Type["BaseField"])`: ormar field with relation definition
- `fields (Union[Set[Any], Dict[Any, Any], None])`: fields to include
- `exclude_fields (Union[Set[Any], Dict[Any, Any], None])`: fields to exclude
- `filter_clauses (List[sqlalchemy.sql.elements.TextClause])`: list of clauses, actually one clause with ids of relation
**Returns**:
`(Tuple[str, List])`: table prefix and raw rows from sql response
<a name="queryset.prefetch_query.PrefetchQuery._get_select_related_if_apply"></a>
#### \_get\_select\_related\_if\_apply
```python
| @staticmethod
| _get_select_related_if_apply(related: str, select_dict: Dict) -> Dict
```
Extract nested part of select_related dictionary to extract models nested
deeper on related model and already loaded in select related query.
**Arguments**:
- `related (str)`: name of the relation
- `select_dict (Dict)`: dictionary of select related models in main query
**Returns**:
`(Dict)`: dictionary with nested part of select related
<a name="queryset.prefetch_query.PrefetchQuery._update_already_loaded_rows"></a>
#### \_update\_already\_loaded\_rows
```python
| _update_already_loaded_rows(target_field: Type["BaseField"], prefetch_dict: Dict, orders_by: Dict) -> None
```
Updates models that are already loaded, usually children of children.
**Arguments**:
- `target_field (Type["BaseField"])`: ormar field with relation definition
- `prefetch_dict (Dict)`: dictionaries of related models to prefetch
- `orders_by (Dict)`: dictionary of order by clauses by model
<a name="queryset.prefetch_query.PrefetchQuery._populate_rows"></a>
#### \_populate\_rows
```python
| _populate_rows(rows: List, target_field: Type["BaseField"], parent_model: Type["Model"], table_prefix: str, fields: Union[Set[Any], Dict[Any, Any], None], exclude_fields: Union[Set[Any], Dict[Any, Any], None], prefetch_dict: Dict, orders_by: Dict) -> None
```
Instantiates children models extracted from given relation.
Populates them with their own nested children if they are included in prefetch
query.
Sets the initialized models and ids of them under corresponding keys in
already_extracted dictionary. Later those instances will be fetched by ids
and set on the parent model after sorting if needed.
**Arguments**:
- `rows (List[sqlalchemy.engine.result.RowProxy])`: raw sql response from the prefetch query
- `target_field (Type["BaseField"])`: field with relation definition from parent model
- `parent_model (Type[Model])`: model with relation definition
- `table_prefix (str)`: prefix of the target table from current relation
- `fields (Union[Set[Any], Dict[Any, Any], None])`: fields to include
- `exclude_fields (Union[Set[Any], Dict[Any, Any], None])`: fields to exclude
- `prefetch_dict (Dict)`: dictionaries of related models to prefetch
- `orders_by (Dict)`: dictionary of order by clauses by model

View File

@ -0,0 +1,657 @@
<a name="queryset.queryset"></a>
# queryset.queryset
<a name="queryset.queryset.QuerySet"></a>
## QuerySet Objects
```python
class QuerySet()
```
Main class to perform database queries, exposed on each model as objects attribute.
<a name="queryset.queryset.QuerySet.model_meta"></a>
#### model\_meta
```python
| @property
| model_meta() -> "ModelMeta"
```
Shortcut to model class Meta set on QuerySet model.
**Returns**:
`(model Meta class)`: Meta class of the model
<a name="queryset.queryset.QuerySet.model"></a>
#### model
```python
| @property
| model() -> Type["Model"]
```
Shortcut to model class set on QuerySet.
**Returns**:
`(Type[Model])`: model class
<a name="queryset.queryset.QuerySet._prefetch_related_models"></a>
#### \_prefetch\_related\_models
```python
| async _prefetch_related_models(models: Sequence[Optional["Model"]], rows: List) -> Sequence[Optional["Model"]]
```
Performs prefetch query for selected models names.
**Arguments**:
- `models (List[Model])`: list of already parsed main Models from main query
- `rows (List[sqlalchemy.engine.result.RowProxy])`: database rows from main query
**Returns**:
`(List[Model])`: list of models with prefetch models populated
<a name="queryset.queryset.QuerySet._process_query_result_rows"></a>
#### \_process\_query\_result\_rows
```python
| _process_query_result_rows(rows: List) -> Sequence[Optional["Model"]]
```
Process database rows and initialize ormar Model from each of the rows.
**Arguments**:
- `rows (List[sqlalchemy.engine.result.RowProxy])`: list of database rows from query result
**Returns**:
`(List[Model])`: list of models
<a name="queryset.queryset.QuerySet.check_single_result_rows_count"></a>
#### check\_single\_result\_rows\_count
```python
| @staticmethod
| check_single_result_rows_count(rows: Sequence[Optional["Model"]]) -> None
```
Verifies if the result has one and only one row.
**Arguments**:
- `rows (List[Model])`: one element list of Models
<a name="queryset.queryset.QuerySet.database"></a>
#### database
```python
| @property
| database() -> databases.Database
```
Shortcut to models database from Meta class.
**Returns**:
`(databases.Database)`: database
<a name="queryset.queryset.QuerySet.table"></a>
#### table
```python
| @property
| table() -> sqlalchemy.Table
```
Shortcut to models table from Meta class.
**Returns**:
`(sqlalchemy.Table)`: database table
<a name="queryset.queryset.QuerySet.build_select_expression"></a>
#### build\_select\_expression
```python
| build_select_expression(limit: int = None, offset: int = None, order_bys: List = None) -> sqlalchemy.sql.select
```
Constructs the actual database query used in the QuerySet.
If any of the params is not passed the QuerySet own value is used.
**Arguments**:
- `limit (int)`: number to limit the query
- `offset (int)`: number to offset by
- `order_bys (List)`: list of order-by fields names
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: built sqlalchemy select expression
<a name="queryset.queryset.QuerySet.filter"></a>
#### filter
```python
| filter(_exclude: bool = False, **kwargs: Any) -> "QuerySet"
```
Allows you to filter by any `Model` attribute/field
as well as to fetch instances, with a filter across an FK relationship.
You can use special filter suffix to change the filter operands:
* exact - like `album__name__exact='Malibu'` (exact match)
* iexact - like `album__name__iexact='malibu'` (exact match case insensitive)
* contains - like `album__name__contains='Mal'` (sql like)
* icontains - like `album__name__icontains='mal'` (sql like case insensitive)
* in - like `album__name__in=['Malibu', 'Barclay']` (sql in)
* gt - like `position__gt=3` (sql >)
* gte - like `position__gte=3` (sql >=)
* lt - like `position__lt=3` (sql <)
* lte - like `position__lte=3` (sql <=)
* startswith - like `album__name__startswith='Mal'` (exact start match)
* istartswith - like `album__name__istartswith='mal'` (case insensitive)
* endswith - like `album__name__endswith='ibu'` (exact end match)
* iendswith - like `album__name__iendswith='IBU'` (case insensitive)
**Arguments**:
- `_exclude (bool)`: flag if it should be exclude or filter
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(QuerySet)`: filtered QuerySet
<a name="queryset.queryset.QuerySet.exclude"></a>
#### exclude
```python
| exclude(**kwargs: Any) -> "QuerySet"
```
Works exactly the same as filter and all modifiers (suffixes) are the same,
but returns a *not* condition.
So if you use `filter(name='John')` which is `where name = 'John'` in SQL,
the `exclude(name='John')` equals to `where name <> 'John'`
Note that all conditions are joined so if you pass multiple values it
becomes a union of conditions.
`exclude(name='John', age>=35)` will become
`where not (name='John' and age>=35)`
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(QuerySet)`: filtered QuerySet
<a name="queryset.queryset.QuerySet.select_related"></a>
#### select\_related
```python
| select_related(related: Union[List, str]) -> "QuerySet"
```
Allows to prefetch related models during the same query.
**With `select_related` always only one query is run against the database**,
meaning that one (sometimes complicated) join is generated and later nested
models are processed in python.
To fetch related model use `ForeignKey` names.
To chain related `Models` relation use double underscores between names.
**Arguments**:
- `related (Union[List, str])`: list of relation field names, can be linked by '__' to nest
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.prefetch_related"></a>
#### prefetch\_related
```python
| prefetch_related(related: Union[List, str]) -> "QuerySet"
```
Allows to prefetch related models during query - but opposite to
`select_related` each subsequent model is fetched in a separate database query.
**With `prefetch_related` always one query per Model is run against the
database**, meaning that you will have multiple queries executed one
after another.
To fetch related model use `ForeignKey` names.
To chain related `Models` relation use double underscores between names.
**Arguments**:
- `related (Union[List, str])`: list of relation field names, can be linked by '__' to nest
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.fields"></a>
#### fields
```python
| fields(columns: Union[List, str, Set, Dict]) -> "QuerySet"
```
With `fields()` you can select subset of model columns to limit the data load.
Note that `fields()` and `exclude_fields()` works both for main models
(on normal queries like `get`, `all` etc.)
as well as `select_related` and `prefetch_related`
models (with nested notation).
You can select specified fields by passing a `str, List[str], Set[str] or
dict` with nested definition.
To include related models use notation
`{related_name}__{column}[__{optional_next} etc.]`.
`fields()` can be called several times, building up the columns to select.
If you include related models into `select_related()` call but you won't specify
columns for those models in fields - implies a list of all fields for
those nested models.
Mandatory fields cannot be excluded as it will raise `ValidationError`,
to exclude a field it has to be nullable.
Pk column cannot be excluded - it's always auto added even if
not explicitly included.
You can also pass fields to include as dictionary or set.
To mark a field as included in a dictionary use it's name as key
and ellipsis as value.
To traverse nested models use nested dictionaries.
To include fields at last level instead of nested dictionary a set can be used.
To include whole nested model specify model related field name and ellipsis.
**Arguments**:
- `columns (Union[List, str, Set, Dict])`: columns to include
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.exclude_fields"></a>
#### exclude\_fields
```python
| exclude_fields(columns: Union[List, str, Set, Dict]) -> "QuerySet"
```
With `exclude_fields()` you can select subset of model columns that will
be excluded to limit the data load.
It's the opposite of `fields()` method so check documentation above
to see what options are available.
Especially check above how you can pass also nested dictionaries
and sets as a mask to exclude fields from whole hierarchy.
Note that `fields()` and `exclude_fields()` works both for main models
(on normal queries like `get`, `all` etc.)
as well as `select_related` and `prefetch_related` models
(with nested notation).
Mandatory fields cannot be excluded as it will raise `ValidationError`,
to exclude a field it has to be nullable.
Pk column cannot be excluded - it's always auto added even
if explicitly excluded.
**Arguments**:
- `columns (Union[List, str, Set, Dict])`: columns to exclude
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.order_by"></a>
#### order\_by
```python
| order_by(columns: Union[List, str]) -> "QuerySet"
```
With `order_by()` you can order the results from database based on your
choice of fields.
You can provide a string with field name or list of strings with fields names.
Ordering in sql will be applied in order of names you provide in order_by.
By default if you do not provide ordering `ormar` explicitly orders by
all primary keys
If you are sorting by nested models that causes that the result rows are
unsorted by the main model `ormar` will combine those children rows into
one main model.
The main model will never duplicate in the result
To order by main model field just provide a field name
To sort on nested models separate field names with dunder '__'.
You can sort this way across all relation types -> `ForeignKey`,
reverse virtual FK and `ManyToMany` fields.
To sort in descending order provide a hyphen in front of the field name
**Arguments**:
- `columns (Union[List, str])`: columns by which models should be sorted
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.exists"></a>
#### exists
```python
| async exists() -> bool
```
Returns a bool value to confirm if there are rows matching the given criteria
(applied with `filter` and `exclude` if set).
**Returns**:
`(bool)`: result of the check
<a name="queryset.queryset.QuerySet.count"></a>
#### count
```python
| async count() -> int
```
Returns number of rows matching the given criteria
(applied with `filter` and `exclude` if set before).
**Returns**:
`(int)`: number of rows
<a name="queryset.queryset.QuerySet.update"></a>
#### update
```python
| async update(each: bool = False, **kwargs: Any) -> int
```
Updates the model table after applying the filters from kwargs.
You have to either pass a filter to narrow down a query or explicitly pass
each=True flag to affect whole table.
**Arguments**:
- `each (bool)`: flag if whole table should be affected if no filter is passed
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(int)`: number of updated rows
<a name="queryset.queryset.QuerySet.delete"></a>
#### delete
```python
| async delete(each: bool = False, **kwargs: Any) -> int
```
Deletes from the model table after applying the filters from kwargs.
You have to either pass a filter to narrow down a query or explicitly pass
each=True flag to affect whole table.
**Arguments**:
- `each (bool)`: flag if whole table should be affected if no filter is passed
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(int)`: number of deleted rows
<a name="queryset.queryset.QuerySet.limit"></a>
#### limit
```python
| limit(limit_count: int, limit_raw_sql: bool = None) -> "QuerySet"
```
You can limit the results to desired number of parent models.
To limit the actual number of database query rows instead of number of main
models use the `limit_raw_sql` parameter flag, and set it to `True`.
**Arguments**:
- `limit_raw_sql (bool)`: flag if raw sql should be limited
- `limit_count (int)`: number of models to limit
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.offset"></a>
#### offset
```python
| offset(offset: int, limit_raw_sql: bool = None) -> "QuerySet"
```
You can also offset the results by desired number of main models.
To offset the actual number of database query rows instead of number of main
models use the `limit_raw_sql` parameter flag, and set it to `True`.
**Arguments**:
- `limit_raw_sql (bool)`: flag if raw sql should be offset
- `offset (int)`: numbers of models to offset
**Returns**:
`(QuerySet)`: QuerySet
<a name="queryset.queryset.QuerySet.first"></a>
#### first
```python
| async first(**kwargs: Any) -> "Model"
```
Gets the first row from the db ordered by primary key column ascending.
**Raises**:
- `NoMatch`: if no rows are returned
- `MultipleMatches`: if more than 1 row is returned.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: returned model
<a name="queryset.queryset.QuerySet.get"></a>
#### get
```python
| async get(**kwargs: Any) -> "Model"
```
Get's the first row from the db meeting the criteria set by kwargs.
If no criteria set it will return the last row in db sorted by pk.
Passing a criteria is actually calling filter(**kwargs) method described below.
**Raises**:
- `NoMatch`: if no rows are returned
- `MultipleMatches`: if more than 1 row is returned.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: returned model
<a name="queryset.queryset.QuerySet.get_or_create"></a>
#### get\_or\_create
```python
| async get_or_create(**kwargs: Any) -> "Model"
```
Combination of create and get methods.
Tries to get a row meeting the criteria fro kwargs
and if `NoMatch` exception is raised
it creates a new one with given kwargs.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: returned or created Model
<a name="queryset.queryset.QuerySet.update_or_create"></a>
#### update\_or\_create
```python
| async update_or_create(**kwargs: Any) -> "Model"
```
Updates the model, or in case there is no match in database creates a new one.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: updated or created model
<a name="queryset.queryset.QuerySet.all"></a>
#### all
```python
| async all(**kwargs: Any) -> Sequence[Optional["Model"]]
```
Returns all rows from a database for given model for set filter options.
Passing kwargs is a shortcut and equals to calling `filter(**kwrags).all()`.
If there are no rows meeting the criteria an empty list is returned.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(List[Model])`: list of returned models
<a name="queryset.queryset.QuerySet.create"></a>
#### create
```python
| async create(**kwargs: Any) -> "Model"
```
Creates the model instance, saves it in a database and returns the updates model
(with pk populated if not passed and autoincrement is set).
The allowed kwargs are `Model` fields names and proper value types.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: created model
<a name="queryset.queryset.QuerySet.bulk_create"></a>
#### bulk\_create
```python
| async bulk_create(objects: List["Model"]) -> None
```
Performs a bulk update in one database session to speed up the process.
Allows you to create multiple objects at once.
A valid list of `Model` objects needs to be passed.
Bulk operations do not send signals.
**Arguments**:
- `objects (List[Model])`: list of ormar models already initialized and ready to save.
<a name="queryset.queryset.QuerySet.bulk_update"></a>
#### bulk\_update
```python
| async bulk_update(objects: List["Model"], columns: List[str] = None) -> None
```
Performs bulk update in one database session to speed up the process.
Allows to update multiple instance at once.
All `Models` passed need to have primary key column populated.
You can also select which fields to update by passing `columns` list
as a list of string names.
Bulk operations do not send signals.
**Arguments**:
- `objects (List[Model])`: list of ormar models
- `columns (List[str])`: list of columns to update

150
docs/api/query-set/query.md Normal file
View File

@ -0,0 +1,150 @@
<a name="queryset.query"></a>
# queryset.query
<a name="queryset.query.Query"></a>
## Query Objects
```python
class Query()
```
<a name="queryset.query.Query._init_sorted_orders"></a>
#### \_init\_sorted\_orders
```python
| _init_sorted_orders() -> None
```
Initialize empty order_by dict to be populated later during the query call
<a name="queryset.query.Query.prefixed_pk_name"></a>
#### prefixed\_pk\_name
```python
| @property
| prefixed_pk_name() -> str
```
Shortcut for extracting prefixed with alias primary key column name from main
model
**Returns**:
`(str)`: alias of pk column prefix with table name.
<a name="queryset.query.Query.alias"></a>
#### alias
```python
| alias(name: str) -> str
```
Shortcut to extracting column alias from given master model.
**Arguments**:
- `name (str)`: name of column
**Returns**:
`(str)`: alias of given column name
<a name="queryset.query.Query.apply_order_bys_for_primary_model"></a>
#### apply\_order\_bys\_for\_primary\_model
```python
| apply_order_bys_for_primary_model() -> None
```
Applies order_by queries on main model when it's used as a subquery.
That way the subquery with limit and offset only on main model has proper
sorting applied and correct models are fetched.
<a name="queryset.query.Query._pagination_query_required"></a>
#### \_pagination\_query\_required
```python
| _pagination_query_required() -> bool
```
Checks if limit or offset are set, the flag limit_sql_raw is not set
and query has select_related applied. Otherwise we can limit/offset normally
at the end of whole query.
**Returns**:
`(bool)`: result of the check
<a name="queryset.query.Query.build_select_expression"></a>
#### build\_select\_expression
```python
| build_select_expression() -> Tuple[sqlalchemy.sql.select, List[str]]
```
Main entry point from outside (after proper initialization).
Extracts columns list to fetch,
construct all required joins for select related,
then applies all conditional and sort clauses.
Returns ready to run query with all joins and clauses.
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: ready to run query with all joins and clauses.
<a name="queryset.query.Query._build_pagination_subquery"></a>
#### \_build\_pagination\_subquery
```python
| _build_pagination_subquery() -> sqlalchemy.sql.select
```
In order to apply limit and offset on main table in join only
(otherwise you can get only partially constructed main model
if number of children exceeds the applied limit and select_related is used)
Used also to change first and get() without argument behaviour.
Needed only if limit or offset are set, the flag limit_sql_raw is not set
and query has select_related applied. Otherwise we can limit/offset normally
at the end of whole query.
**Returns**:
`(sqlalchemy.sql.select)`: constructed subquery on main table with limit, offset and order applied
<a name="queryset.query.Query._apply_expression_modifiers"></a>
#### \_apply\_expression\_modifiers
```python
| _apply_expression_modifiers(expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select
```
Receives the select query (might be join) and applies:
* Filter clauses
* Exclude filter clauses
* Limit clauses
* Offset clauses
* Order by clauses
Returns complete ready to run query.
**Arguments**:
- `expr (sqlalchemy.sql.selectable.Select)`: select expression before clauses
**Returns**:
`(sqlalchemy.sql.selectable.Select)`: expresion with all present clauses applied
<a name="queryset.query.Query._reset_query_parameters"></a>
#### \_reset\_query\_parameters
```python
| _reset_query_parameters() -> None
```
Although it should be created each time before the call we reset the key params
anyway.

152
docs/api/query-set/utils.md Normal file
View File

@ -0,0 +1,152 @@
<a name="queryset.utils"></a>
# queryset.utils
<a name="queryset.utils.check_node_not_dict_or_not_last_node"></a>
#### check\_node\_not\_dict\_or\_not\_last\_node
```python
check_node_not_dict_or_not_last_node(part: str, parts: List, current_level: Any) -> bool
```
Checks if given name is not present in the current level of the structure.
Checks if given name is not the last name in the split list of parts.
Checks if the given name in current level is not a dictionary.
All those checks verify if there is a need for deeper traversal.
**Arguments**:
- `part (str)`:
- `parts (List[str])`:
- `current_level (Any)`: current level of the traversed structure
**Returns**:
`(bool)`: result of the check
<a name="queryset.utils.translate_list_to_dict"></a>
#### translate\_list\_to\_dict
```python
translate_list_to_dict(list_to_trans: Union[List, Set], is_order: bool = False) -> Dict
```
Splits the list of strings by '__' and converts them to dictionary with nested
models grouped by parent model. That way each model appears only once in the whole
dictionary and children are grouped under parent name.
Default required key ise Ellipsis like in pydantic.
**Arguments**:
- `list_to_trans (set)`: input list
- `is_order (bool)`: flag if change affects order_by clauses are they require special
default value with sort order.
**Returns**:
`(Dict)`: converted to dictionary input list
<a name="queryset.utils.convert_set_to_required_dict"></a>
#### convert\_set\_to\_required\_dict
```python
convert_set_to_required_dict(set_to_convert: set) -> Dict
```
Converts set to dictionary of required keys.
Required key is Ellipsis.
**Arguments**:
- `set_to_convert (set)`: set to convert to dict
**Returns**:
`(Dict)`: set converted to dict of ellipsis
<a name="queryset.utils.update"></a>
#### update
```python
update(current_dict: Any, updating_dict: Any) -> Dict
```
Update one dict with another but with regard for nested keys.
That way nested sets are unionised, dicts updated and
only other values are overwritten.
**Arguments**:
- `current_dict (Dict[str, ellipsis])`: dict to update
- `updating_dict (Dict)`: dict with values to update
**Returns**:
`(Dict)`: combination of both dicts
<a name="queryset.utils.update_dict_from_list"></a>
#### update\_dict\_from\_list
```python
update_dict_from_list(curr_dict: Dict, list_to_update: Union[List, Set]) -> Dict
```
Converts the list into dictionary and later performs special update, where
nested keys that are sets or dicts are combined and not overwritten.
**Arguments**:
- `curr_dict (Dict)`: dict to update
- `list_to_update (List[str])`: list with values to update the dict
**Returns**:
`(Dict)`: updated dict
<a name="queryset.utils.extract_nested_models"></a>
#### extract\_nested\_models
```python
extract_nested_models(model: "Model", model_type: Type["Model"], select_dict: Dict, extracted: Dict) -> None
```
Iterates over model relations and extracts all nested models from select_dict and
puts them in corresponding list under relation name in extracted dict.keys
Basically flattens all relation to dictionary of all related models, that can be
used on several models and extract all of their children into dictionary of lists
witch children models.
Goes also into nested relations if needed (specified in select_dict).
**Arguments**:
- `model (Model)`: parent Model
- `model_type (Type[Model])`: parent model class
- `select_dict (Dict)`: dictionary of related models from select_related
- `extracted (Dict)`: dictionary with already extracted models
<a name="queryset.utils.extract_models_to_dict_of_lists"></a>
#### extract\_models\_to\_dict\_of\_lists
```python
extract_models_to_dict_of_lists(model_type: Type["Model"], models: Sequence["Model"], select_dict: Dict, extracted: Dict = None) -> Dict
```
Receives a list of models and extracts all of the children and their children
into dictionary of lists with children models, flattening the structure to one dict
with all children models under their relation keys.
**Arguments**:
- `model_type (Type[Model])`: parent model class
- `models (List[Model])`: list of models from which related models should be extracted.
- `select_dict (Dict)`: dictionary of related models from select_related
- `extracted (Dict)`: dictionary with already extracted models
**Returns**:
`(Dict)`: dictionary of lists f related models

View File

@ -0,0 +1,120 @@
<a name="relations.alias_manager"></a>
# relations.alias\_manager
<a name="relations.alias_manager.get_table_alias"></a>
#### get\_table\_alias
```python
get_table_alias() -> str
```
Creates a random string that is used to alias tables in joins.
It's necessary that each relation has it's own aliases cause you can link
to the same target tables from multiple fields on one model as well as from
multiple different models in one join.
**Returns**:
`(str)`: randomly generated alias
<a name="relations.alias_manager.AliasManager"></a>
## AliasManager Objects
```python
class AliasManager()
```
Keep all aliases of relations between different tables.
One global instance is shared between all models.
<a name="relations.alias_manager.AliasManager.prefixed_columns"></a>
#### prefixed\_columns
```python
| @staticmethod
| prefixed_columns(alias: str, table: sqlalchemy.Table, fields: List = None) -> List[text]
```
Creates a list of aliases sqlalchemy text clauses from
string alias and sqlalchemy.Table.
Optional list of fields to include can be passed to extract only those columns.
List has to have sqlalchemy names of columns (ormar aliases) not the ormar ones.
**Arguments**:
- `alias (str)`: alias of given table
- `table (sqlalchemy.Table)`: table from which fields should be aliased
- `fields (Optional[List[str]])`: fields to include
**Returns**:
`(List[text])`: list of sqlalchemy text clauses with "column name as aliased name"
<a name="relations.alias_manager.AliasManager.prefixed_table_name"></a>
#### prefixed\_table\_name
```python
| @staticmethod
| prefixed_table_name(alias: str, name: str) -> text
```
Creates text clause with table name with aliased name.
**Arguments**:
- `alias (str)`: alias of given table
- `name (str)`: table name
**Returns**:
`(sqlalchemy text clause)`: sqlalchemy text clause as "table_name aliased_name"
<a name="relations.alias_manager.AliasManager.add_relation_type"></a>
#### add\_relation\_type
```python
| add_relation_type(source_model: Type["Model"], relation_name: str, reverse_name: str = None, is_multi: bool = False) -> None
```
Registers the relations defined in ormar models.
Given the relation it registers also the reverse side of this relation.
Used by both ForeignKey and ManyToMany relations.
Each relation is registered as Model name and relation name.
Each alias registered has to be unique.
Aliases are used to construct joins to assure proper links between tables.
That way you can link to the same target tables from multiple fields
on one model as well as from multiple different models in one join.
**Arguments**:
- `source_model (source Model)`: model with relation defined
- `relation_name (str)`: name of the relation to define
- `reverse_name (Optional[str])`: name of related_name fo given relation for m2m relations
- `is_multi (bool)`: flag if relation being registered is a through m2m model
**Returns**:
`(None)`: none
<a name="relations.alias_manager.AliasManager.resolve_relation_alias"></a>
#### resolve\_relation\_alias
```python
| resolve_relation_alias(from_model: Type["Model"], relation_name: str) -> str
```
Given model and relation name returns the alias for this relation.
**Arguments**:
- `from_model (source Model)`: model with relation defined
- `relation_name (str)`: name of the relation field
**Returns**:
`(str)`: alias of the relation

View File

@ -0,0 +1,588 @@
<a name="relations.querysetproxy"></a>
# relations.querysetproxy
<a name="relations.querysetproxy.QuerysetProxy"></a>
## QuerysetProxy Objects
```python
class QuerysetProxy(ormar.QuerySetProtocol)
```
Exposes QuerySet methods on relations, but also handles creating and removing
of through Models for m2m relations.
<a name="relations.querysetproxy.QuerysetProxy.queryset"></a>
#### queryset
```python
| @property
| queryset() -> "QuerySet"
```
Returns queryset if it's set, AttributeError otherwise.
**Returns**:
`(QuerySet)`: QuerySet
<a name="relations.querysetproxy.QuerysetProxy.queryset"></a>
#### queryset
```python
| @queryset.setter
| queryset(value: "QuerySet") -> None
```
Set's the queryset. Initialized in RelationProxy.
**Arguments**:
- `value (QuerySet)`: QuerySet
<a name="relations.querysetproxy.QuerysetProxy._assign_child_to_parent"></a>
#### \_assign\_child\_to\_parent
```python
| _assign_child_to_parent(child: Optional["T"]) -> None
```
Registers child in parents RelationManager.
**Arguments**:
- `child (Model)`: child to register on parent side.
<a name="relations.querysetproxy.QuerysetProxy._register_related"></a>
#### \_register\_related
```python
| _register_related(child: Union["T", Sequence[Optional["T"]]]) -> None
```
Registers child/ children in parents RelationManager.
**Arguments**:
- `child (Union[Model,List[Model]])`: child or list of children models to register.
<a name="relations.querysetproxy.QuerysetProxy._clean_items_on_load"></a>
#### \_clean\_items\_on\_load
```python
| _clean_items_on_load() -> None
```
Cleans the current list of the related models.
<a name="relations.querysetproxy.QuerysetProxy.create_through_instance"></a>
#### create\_through\_instance
```python
| async create_through_instance(child: "T") -> None
```
Crete a through model instance in the database for m2m relations.
**Arguments**:
- `child (Model)`: child model instance
<a name="relations.querysetproxy.QuerysetProxy.delete_through_instance"></a>
#### delete\_through\_instance
```python
| async delete_through_instance(child: "T") -> None
```
Removes through model instance from the database for m2m relations.
**Arguments**:
- `child (Model)`: child model instance
<a name="relations.querysetproxy.QuerysetProxy.exists"></a>
#### exists
```python
| async exists() -> bool
```
Returns a bool value to confirm if there are rows matching the given criteria
(applied with `filter` and `exclude` if set).
Actual call delegated to QuerySet.
**Returns**:
`(bool)`: result of the check
<a name="relations.querysetproxy.QuerysetProxy.count"></a>
#### count
```python
| async count() -> int
```
Returns number of rows matching the given criteria
(applied with `filter` and `exclude` if set before).
Actual call delegated to QuerySet.
**Returns**:
`(int)`: number of rows
<a name="relations.querysetproxy.QuerysetProxy.clear"></a>
#### clear
```python
| async clear(keep_reversed: bool = True) -> int
```
Removes all related models from given relation.
Removes all through models for m2m relation.
For reverse FK relations keep_reversed flag marks if the reversed models
should be kept or deleted from the database too (False means that models
will be deleted, and not only removed from relation).
**Arguments**:
- `keep_reversed (bool)`: flag if reverse models in reverse FK should be deleted
or not, keep_reversed=False deletes them from database.
**Returns**:
`(int)`: number of deleted models
<a name="relations.querysetproxy.QuerysetProxy.first"></a>
#### first
```python
| async first(**kwargs: Any) -> "Model"
```
Gets the first row from the db ordered by primary key column ascending.
Actual call delegated to QuerySet.
List of related models is cleared before the call.
**Arguments**:
- `kwargs ()`:
**Returns**:
`(_asyncio.Future)`:
<a name="relations.querysetproxy.QuerysetProxy.get"></a>
#### get
```python
| async get(**kwargs: Any) -> "Model"
```
Get's the first row from the db meeting the criteria set by kwargs.
If no criteria set it will return the last row in db sorted by pk.
Passing a criteria is actually calling filter(**kwargs) method described below.
Actual call delegated to QuerySet.
List of related models is cleared before the call.
**Raises**:
- `NoMatch`: if no rows are returned
- `MultipleMatches`: if more than 1 row is returned.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: returned model
<a name="relations.querysetproxy.QuerysetProxy.all"></a>
#### all
```python
| async all(**kwargs: Any) -> Sequence[Optional["Model"]]
```
Returns all rows from a database for given model for set filter options.
Passing kwargs is a shortcut and equals to calling `filter(**kwrags).all()`.
If there are no rows meeting the criteria an empty list is returned.
Actual call delegated to QuerySet.
List of related models is cleared before the call.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(List[Model])`: list of returned models
<a name="relations.querysetproxy.QuerysetProxy.create"></a>
#### create
```python
| async create(**kwargs: Any) -> "Model"
```
Creates the model instance, saves it in a database and returns the updates model
(with pk populated if not passed and autoincrement is set).
The allowed kwargs are `Model` fields names and proper value types.
For m2m relation the through model is created automatically.
Actual call delegated to QuerySet.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: created model
<a name="relations.querysetproxy.QuerysetProxy.get_or_create"></a>
#### get\_or\_create
```python
| async get_or_create(**kwargs: Any) -> "Model"
```
Combination of create and get methods.
Tries to get a row meeting the criteria fro kwargs
and if `NoMatch` exception is raised
it creates a new one with given kwargs.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: returned or created Model
<a name="relations.querysetproxy.QuerysetProxy.update_or_create"></a>
#### update\_or\_create
```python
| async update_or_create(**kwargs: Any) -> "Model"
```
Updates the model, or in case there is no match in database creates a new one.
Actual call delegated to QuerySet.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(Model)`: updated or created model
<a name="relations.querysetproxy.QuerysetProxy.filter"></a>
#### filter
```python
| filter(**kwargs: Any) -> "QuerysetProxy"
```
Allows you to filter by any `Model` attribute/field
as well as to fetch instances, with a filter across an FK relationship.
You can use special filter suffix to change the filter operands:
* exact - like `album__name__exact='Malibu'` (exact match)
* iexact - like `album__name__iexact='malibu'` (exact match case insensitive)
* contains - like `album__name__contains='Mal'` (sql like)
* icontains - like `album__name__icontains='mal'` (sql like case insensitive)
* in - like `album__name__in=['Malibu', 'Barclay']` (sql in)
* gt - like `position__gt=3` (sql >)
* gte - like `position__gte=3` (sql >=)
* lt - like `position__lt=3` (sql <)
* lte - like `position__lte=3` (sql <=)
* startswith - like `album__name__startswith='Mal'` (exact start match)
* istartswith - like `album__name__istartswith='mal'` (case insensitive)
* endswith - like `album__name__endswith='ibu'` (exact end match)
* iendswith - like `album__name__iendswith='IBU'` (case insensitive)
Actual call delegated to QuerySet.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(QuerysetProxy)`: filtered QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.exclude"></a>
#### exclude
```python
| exclude(**kwargs: Any) -> "QuerysetProxy"
```
Works exactly the same as filter and all modifiers (suffixes) are the same,
but returns a *not* condition.
So if you use `filter(name='John')` which is `where name = 'John'` in SQL,
the `exclude(name='John')` equals to `where name <> 'John'`
Note that all conditions are joined so if you pass multiple values it
becomes a union of conditions.
`exclude(name='John', age>=35)` will become
`where not (name='John' and age>=35)`
Actual call delegated to QuerySet.
**Arguments**:
- `kwargs (Any)`: fields names and proper value types
**Returns**:
`(QuerysetProxy)`: filtered QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.select_related"></a>
#### select\_related
```python
| select_related(related: Union[List, str]) -> "QuerysetProxy"
```
Allows to prefetch related models during the same query.
**With `select_related` always only one query is run against the database**,
meaning that one (sometimes complicated) join is generated and later nested
models are processed in python.
To fetch related model use `ForeignKey` names.
To chain related `Models` relation use double underscores between names.
Actual call delegated to QuerySet.
**Arguments**:
- `related (Union[List, str])`: list of relation field names, can be linked by '__' to nest
**Returns**:
`(QuerysetProxy)`: QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.prefetch_related"></a>
#### prefetch\_related
```python
| prefetch_related(related: Union[List, str]) -> "QuerysetProxy"
```
Allows to prefetch related models during query - but opposite to
`select_related` each subsequent model is fetched in a separate database query.
**With `prefetch_related` always one query per Model is run against the
database**, meaning that you will have multiple queries executed one
after another.
To fetch related model use `ForeignKey` names.
To chain related `Models` relation use double underscores between names.
Actual call delegated to QuerySet.
**Arguments**:
- `related (Union[List, str])`: list of relation field names, can be linked by '__' to nest
**Returns**:
`(QuerysetProxy)`: QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.limit"></a>
#### limit
```python
| limit(limit_count: int) -> "QuerysetProxy"
```
You can limit the results to desired number of parent models.
Actual call delegated to QuerySet.
**Arguments**:
- `limit_count (int)`: number of models to limit
**Returns**:
`(QuerysetProxy)`: QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.offset"></a>
#### offset
```python
| offset(offset: int) -> "QuerysetProxy"
```
You can also offset the results by desired number of main models.
Actual call delegated to QuerySet.
**Arguments**:
- `offset (int)`: numbers of models to offset
**Returns**:
`(QuerysetProxy)`: QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.fields"></a>
#### fields
```python
| fields(columns: Union[List, str, Set, Dict]) -> "QuerysetProxy"
```
With `fields()` you can select subset of model columns to limit the data load.
Note that `fields()` and `exclude_fields()` works both for main models
(on normal queries like `get`, `all` etc.)
as well as `select_related` and `prefetch_related`
models (with nested notation).
You can select specified fields by passing a `str, List[str], Set[str] or
dict` with nested definition.
To include related models use notation
`{related_name}__{column}[__{optional_next} etc.]`.
`fields()` can be called several times, building up the columns to select.
If you include related models into `select_related()` call but you won't specify
columns for those models in fields - implies a list of all fields for
those nested models.
Mandatory fields cannot be excluded as it will raise `ValidationError`,
to exclude a field it has to be nullable.
Pk column cannot be excluded - it's always auto added even if
not explicitly included.
You can also pass fields to include as dictionary or set.
To mark a field as included in a dictionary use it's name as key
and ellipsis as value.
To traverse nested models use nested dictionaries.
To include fields at last level instead of nested dictionary a set can be used.
To include whole nested model specify model related field name and ellipsis.
Actual call delegated to QuerySet.
**Arguments**:
- `columns (Union[List, str, Set, Dict])`: columns to include
**Returns**:
`(QuerysetProxy)`: QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.exclude_fields"></a>
#### exclude\_fields
```python
| exclude_fields(columns: Union[List, str, Set, Dict]) -> "QuerysetProxy"
```
With `exclude_fields()` you can select subset of model columns that will
be excluded to limit the data load.
It's the opposite of `fields()` method so check documentation above
to see what options are available.
Especially check above how you can pass also nested dictionaries
and sets as a mask to exclude fields from whole hierarchy.
Note that `fields()` and `exclude_fields()` works both for main models
(on normal queries like `get`, `all` etc.)
as well as `select_related` and `prefetch_related` models
(with nested notation).
Mandatory fields cannot be excluded as it will raise `ValidationError`,
to exclude a field it has to be nullable.
Pk column cannot be excluded - it's always auto added even
if explicitly excluded.
Actual call delegated to QuerySet.
**Arguments**:
- `columns (Union[List, str, Set, Dict])`: columns to exclude
**Returns**:
`(QuerysetProxy)`: QuerysetProxy
<a name="relations.querysetproxy.QuerysetProxy.order_by"></a>
#### order\_by
```python
| order_by(columns: Union[List, str]) -> "QuerysetProxy"
```
With `order_by()` you can order the results from database based on your
choice of fields.
You can provide a string with field name or list of strings with fields names.
Ordering in sql will be applied in order of names you provide in order_by.
By default if you do not provide ordering `ormar` explicitly orders by
all primary keys
If you are sorting by nested models that causes that the result rows are
unsorted by the main model `ormar` will combine those children rows into
one main model.
The main model will never duplicate in the result
To order by main model field just provide a field name
To sort on nested models separate field names with dunder '__'.
You can sort this way across all relation types -> `ForeignKey`,
reverse virtual FK and `ManyToMany` fields.
To sort in descending order provide a hyphen in front of the field name
Actual call delegated to QuerySet.
**Arguments**:
- `columns (Union[List, str])`: columns by which models should be sorted
**Returns**:
`(QuerysetProxy)`: QuerysetProxy

View File

@ -0,0 +1,152 @@
<a name="relations.relation_manager"></a>
# relations.relation\_manager
<a name="relations.relation_manager.RelationsManager"></a>
## RelationsManager Objects
```python
class RelationsManager()
```
Manages relations on a Model, each Model has it's own instance.
<a name="relations.relation_manager.RelationsManager._get_relation_type"></a>
#### \_get\_relation\_type
```python
| _get_relation_type(field: Type[BaseField]) -> RelationType
```
Returns type of the relation declared on a field.
**Arguments**:
- `field (Type[BaseField])`: field with relation declaration
**Returns**:
`(RelationType)`: type of the relation defined on field
<a name="relations.relation_manager.RelationsManager._add_relation"></a>
#### \_add\_relation
```python
| _add_relation(field: Type[BaseField]) -> None
```
Registers relation in the manager.
Adds Relation instance under field.name.
**Arguments**:
- `field (Type[BaseField])`: field with relation declaration
<a name="relations.relation_manager.RelationsManager.__contains__"></a>
#### \_\_contains\_\_
```python
| __contains__(item: str) -> bool
```
Checks if relation with given name is already registered.
**Arguments**:
- `item (str)`: name of attribute
**Returns**:
`(bool)`: result of the check
<a name="relations.relation_manager.RelationsManager.get"></a>
#### get
```python
| get(name: str) -> Optional[Union["T", Sequence["T"]]]
```
Returns the related model/models if relation is set.
Actual call is delegated to Relation instance registered under relation name.
**Arguments**:
- `name (str)`: name of the relation
**Returns**:
`(Optional[Union[Model, List[Model]])`: related model or list of related models if set
<a name="relations.relation_manager.RelationsManager._get"></a>
#### \_get
```python
| _get(name: str) -> Optional[Relation]
```
Returns the actual relation and not the related model(s).
**Arguments**:
- `name (str)`: name of the relation
**Returns**:
`(ormar.relations.relation.Relation)`: Relation instance
<a name="relations.relation_manager.RelationsManager.add"></a>
#### add
```python
| @staticmethod
| add(parent: "Model", child: "Model", child_name: str, virtual: bool, relation_name: str) -> None
```
Adds relation on both sides -> meaning on both child and parent models.
One side of the relation is always weakref proxy to avoid circular refs.
Based on the side from which relation is added and relation name actual names
of parent and child relations are established. The related models are registered
on both ends.
**Arguments**:
- `parent (Model)`: parent model on which relation should be registered
- `child (Model)`: child model to register
- `child_name (str)`: potential child name used if related name is not set
- `virtual (bool)`:
- `relation_name (str)`: name of the relation
<a name="relations.relation_manager.RelationsManager.remove"></a>
#### remove
```python
| remove(name: str, child: Union["NewBaseModel", Type["NewBaseModel"]]) -> None
```
Removes given child from relation with given name.
Since you can have many relations between two models you need to pass a name
of relation from which you want to remove the child.
**Arguments**:
- `name (str)`: name of the relation
- `child (Union[Model, Type[Model]])`: child to remove from relation
<a name="relations.relation_manager.RelationsManager.remove_parent"></a>
#### remove\_parent
```python
| @staticmethod
| remove_parent(item: Union["NewBaseModel", Type["NewBaseModel"]], parent: "Model", name: str) -> None
```
Removes given parent from relation with given name.
Since you can have many relations between two models you need to pass a name
of relation from which you want to remove the parent.
**Arguments**:
- `item (Union[Model, Type[Model]])`: model with parent registered
- `parent (Model)`: parent Model
- `name (str)`: name of the relation

View File

@ -0,0 +1,144 @@
<a name="relations.relation_proxy"></a>
# relations.relation\_proxy
<a name="relations.relation_proxy.RelationProxy"></a>
## RelationProxy Objects
```python
class RelationProxy(list)
```
Proxy of the Relation that is a list with special methods.
<a name="relations.relation_proxy.RelationProxy.related_field_name"></a>
#### related\_field\_name
```python
| @property
| related_field_name() -> str
```
On first access calculates the name of the related field, later stored in
_related_field_name property.
**Returns**:
`(str)`: name of the related field
<a name="relations.relation_proxy.RelationProxy.__getattribute__"></a>
#### \_\_getattribute\_\_
```python
| __getattribute__(item: str) -> Any
```
Since some QuerySetProxy methods overwrite builtin list methods we
catch calls to them and delegate it to QuerySetProxy instead.
**Arguments**:
- `item (str)`: name of attribute
**Returns**:
`(Any)`: value of attribute
<a name="relations.relation_proxy.RelationProxy.__getattr__"></a>
#### \_\_getattr\_\_
```python
| __getattr__(item: str) -> Any
```
Delegates calls for non existing attributes to QuerySetProxy.
**Arguments**:
- `item (str)`: name of attribute/method
**Returns**:
`(method)`: method from QuerySetProxy if exists
<a name="relations.relation_proxy.RelationProxy._initialize_queryset"></a>
#### \_initialize\_queryset
```python
| _initialize_queryset() -> None
```
Initializes the QuerySetProxy if not yet initialized.
<a name="relations.relation_proxy.RelationProxy._check_if_queryset_is_initialized"></a>
#### \_check\_if\_queryset\_is\_initialized
```python
| _check_if_queryset_is_initialized() -> bool
```
Checks if the QuerySetProxy is already set and ready.
**Returns**:
`(bool)`: result of the check
<a name="relations.relation_proxy.RelationProxy._check_if_model_saved"></a>
#### \_check\_if\_model\_saved
```python
| _check_if_model_saved() -> None
```
Verifies if the parent model of the relation has been already saved.
Otherwise QuerySetProxy cannot filter by parent primary key.
<a name="relations.relation_proxy.RelationProxy._set_queryset"></a>
#### \_set\_queryset
```python
| _set_queryset() -> "QuerySet"
```
Creates new QuerySet with relation model and pre filters it with currents
parent model primary key, so all queries by definition are already related
to the parent model only, without need for user to filter them.
**Returns**:
`(QuerySet)`: initialized QuerySet
<a name="relations.relation_proxy.RelationProxy.remove"></a>
#### remove
```python
| async remove(item: "Model", keep_reversed: bool = True) -> None
```
Removes the item from relation with parent.
Through models are automatically deleted for m2m relations.
For reverse FK relations keep_reversed flag marks if the reversed models
should be kept or deleted from the database too (False means that models
will be deleted, and not only removed from relation).
**Arguments**:
- `item (Model)`: child to remove from relation
- `keep_reversed (bool)`: flag if the reversed model should be kept or deleted too
<a name="relations.relation_proxy.RelationProxy.add"></a>
#### add
```python
| async add(item: "Model") -> None
```
Adds child model to relation.
For ManyToMany relations through instance is automatically created.
**Arguments**:
- `item (Model)`: child to add to relation

View File

@ -0,0 +1,112 @@
<a name="relations.relation"></a>
# relations.relation
<a name="relations.relation.RelationType"></a>
## RelationType Objects
```python
class RelationType(Enum)
```
Different types of relations supported by ormar:
* ForeignKey = PRIMARY
* reverse ForeignKey = REVERSE
* ManyToMany = MULTIPLE
<a name="relations.relation.Relation"></a>
## Relation Objects
```python
class Relation()
```
Keeps related Models and handles adding/removing of the children.
<a name="relations.relation.Relation.__init__"></a>
#### \_\_init\_\_
```python
| __init__(manager: "RelationsManager", type_: RelationType, field_name: str, to: Type["T"], through: Type["T"] = None) -> None
```
Initialize the Relation and keep the related models either as instances of
passed Model, or as a RelationProxy which is basically a list of models with
some special behavior, as it exposes QuerySetProxy and allows querying the
related models already pre filtered by parent model.
**Arguments**:
- `manager (RelationsManager)`: reference to relation manager
- `type_ (RelationType)`: type of the relation
- `field_name (str)`: name of the relation field
- `to (Type[Model])`: model to which relation leads to
- `through (Type[Model])`: model through which relation goes for m2m relations
<a name="relations.relation.Relation._clean_related"></a>
#### \_clean\_related
```python
| _clean_related() -> None
```
Removes dead weakrefs from RelationProxy.
<a name="relations.relation.Relation._find_existing"></a>
#### \_find\_existing
```python
| _find_existing(child: Union["NewBaseModel", Type["NewBaseModel"]]) -> Optional[int]
```
Find child model in RelationProxy if exists.
**Arguments**:
- `child (Model)`: child model to find
**Returns**:
`(Optional[ind])`: index of child in RelationProxy
<a name="relations.relation.Relation.add"></a>
#### add
```python
| add(child: "T") -> None
```
Adds child Model to relation, either sets child as related model or adds
it to the list in RelationProxy depending on relation type.
**Arguments**:
- `child (Model)`: model to add to relation
<a name="relations.relation.Relation.remove"></a>
#### remove
```python
| remove(child: Union["NewBaseModel", Type["NewBaseModel"]]) -> None
```
Removes child Model from relation, either sets None as related model or removes
it from the list in RelationProxy depending on relation type.
**Arguments**:
- `child (Model)`: model to remove from relation
<a name="relations.relation.Relation.get"></a>
#### get
```python
| get() -> Optional[Union[List["T"], "T"]]
```
Return the related model or models from RelationProxy.
**Returns**:
`(Optional[Union[List[Model], Model]])`: related model/models if set

View File

@ -0,0 +1,26 @@
<a name="relations.utils"></a>
# relations.utils
<a name="relations.utils.get_relations_sides_and_names"></a>
#### get\_relations\_sides\_and\_names
```python
get_relations_sides_and_names(to_field: Type[BaseField], parent: "Model", child: "Model", child_name: str, virtual: bool, relation_name: str) -> Tuple["Model", "Model", str, str]
```
Determines the names of child and parent relations names, as well as
changes one of the sides of the relation into weakref.proxy to model.
**Arguments**:
- `to_field (BaseField)`: field with relation definition
- `parent (Model)`: parent model
- `child (Model)`: child model
- `child_name (str)`: name of the child
- `virtual (bool)`: flag if relation is virtual
- `relation_name ()`:
**Returns**:
`(Tuple["Model", "Model", str, str])`: parent, child, child_name, to_name

View File

@ -0,0 +1,130 @@
<a name="decorators.signals"></a>
# decorators.signals
<a name="decorators.signals.receiver"></a>
#### receiver
```python
receiver(signal: str, senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for given signal name.
**Arguments**:
- `signal (str)`: name of the signal to register to
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched
<a name="decorators.signals.post_save"></a>
#### post\_save
```python
post_save(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for post_save signal.
**Arguments**:
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched
<a name="decorators.signals.post_update"></a>
#### post\_update
```python
post_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for post_update signal.
**Arguments**:
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched
<a name="decorators.signals.post_delete"></a>
#### post\_delete
```python
post_delete(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for post_delete signal.
**Arguments**:
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched
<a name="decorators.signals.pre_save"></a>
#### pre\_save
```python
pre_save(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for pre_save signal.
**Arguments**:
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched
<a name="decorators.signals.pre_update"></a>
#### pre\_update
```python
pre_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for pre_update signal.
**Arguments**:
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched
<a name="decorators.signals.pre_delete"></a>
#### pre\_delete
```python
pre_delete(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable
```
Connect given function to all senders for pre_delete signal.
**Arguments**:
- `senders (Union[Type["Model"], List[Type["Model"]]])`: one or a list of "Model" classes
that should have the signal receiver registered
**Returns**:
`(Callable)`: returns the original function untouched

106
docs/api/signals/signal.md Normal file
View File

@ -0,0 +1,106 @@
<a name="signals.signal"></a>
# signals.signal
<a name="signals.signal.callable_accepts_kwargs"></a>
#### callable\_accepts\_kwargs
```python
callable_accepts_kwargs(func: Callable) -> bool
```
Checks if function accepts **kwargs.
**Arguments**:
- `func (function)`: function which signature needs to be checked
**Returns**:
`(bool)`: result of the check
<a name="signals.signal.make_id"></a>
#### make\_id
```python
make_id(target: Any) -> Union[int, Tuple[int, int]]
```
Creates id of a function or method to be used as key to store signal
**Arguments**:
- `target (Any)`: target which id we want
**Returns**:
`(int)`: id of the target
<a name="signals.signal.Signal"></a>
## Signal Objects
```python
class Signal()
```
Signal that notifies all receiver functions.
In ormar used by models to send pre_save, post_save etc. signals.
<a name="signals.signal.Signal.connect"></a>
#### connect
```python
| connect(receiver: Callable) -> None
```
Connects given receiver function to the signal.
**Raises**:
- `SignalDefinitionError`: if receiver is not callable
or not accept **kwargs
**Arguments**:
- `receiver (Callable)`: receiver function
<a name="signals.signal.Signal.disconnect"></a>
#### disconnect
```python
| disconnect(receiver: Callable) -> bool
```
Removes the receiver function from the signal.
**Arguments**:
- `receiver (Callable)`: receiver function
**Returns**:
`(bool)`: flag if receiver was removed
<a name="signals.signal.Signal.send"></a>
#### send
```python
| async send(sender: Type["Model"], **kwargs: Any) -> None
```
Notifies all receiver functions with given kwargs
**Arguments**:
- `sender (Type["Model"])`: model that sends the signal
- `kwargs (Any)`: arguments passed to receivers
<a name="signals.signal.SignalEmitter"></a>
## SignalEmitter Objects
```python
class SignalEmitter()
```
Emitter that registers the signals in internal dictionary.
If signal with given name does not exist it's auto added on access.

View File

@ -60,3 +60,9 @@ mkdocs build
# ... commit, push, and create your pull request # ... commit, push, and create your pull request
``` ```
!!!tip
For more information on how and why ormar works the way it works
please see the [API documentation][API documentation]
[API documentation]: ./api/index.md

View File

@ -9,20 +9,46 @@ Here you can find a very simple sample application code.
!!!warning !!!warning
This example assumes that you already have a database created. If that is not the case please visit [database initialization][database initialization] section. This example assumes that you already have a database created. If that is not the case please visit [database initialization][database initialization] section.
!!!tip
The following example (all sections) should be put in one file.
It's divided into subsections for clarity.
## Imports and initialization ## Imports and initialization
First take care of the imports and initialization First take care of the imports and initialization
```python hl_lines="1-12" ```python
--8<-- "../docs_src/fastapi/docs001.py" from typing import List, Optional
import databases
import sqlalchemy
from fastapi import FastAPI
import ormar
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database("sqlite:///test.db")
app.state.database = database
``` ```
## Database connection ## Database connection
Next define startup and shutdown events (or use middleware) Next define startup and shutdown events (or use middleware)
- note that this is `databases` specific setting not the ormar one - note that this is `databases` specific setting not the ormar one
```python hl_lines="15-26" ```python
--8<-- "../docs_src/fastapi/docs001.py" @app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
``` ```
!!!info !!!info
@ -33,8 +59,27 @@ Next define startup and shutdown events (or use middleware)
Define ormar models with appropriate fields. Define ormar models with appropriate fields.
Those models will be used insted of pydantic ones. Those models will be used insted of pydantic ones.
```python hl_lines="29-47"
--8<-- "../docs_src/fastapi/docs001.py" ```python
class Category(ormar.Model):
class Meta:
tablename = "categories"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Item(ormar.Model):
class Meta:
tablename = "items"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
category: Optional[Category] = ormar.ForeignKey(Category, nullable=True)
``` ```
!!!tip !!!tip
@ -45,8 +90,38 @@ Those models will be used insted of pydantic ones.
Define your desired endpoints, note how `ormar` models are used both Define your desired endpoints, note how `ormar` models are used both
as `response_model` and as a requests parameters. as `response_model` and as a requests parameters.
```python hl_lines="50-79" ```python
--8<-- "../docs_src/fastapi/docs001.py" @app.get("/items/", response_model=List[Item])
async def get_items():
items = await Item.objects.select_related("category").all()
return items
@app.post("/items/", response_model=Item)
async def create_item(item: Item):
await item.save()
return item
@app.post("/categories/", response_model=Category)
async def create_category(category: Category):
await category.save()
return category
@app.put("/items/{item_id}")
async def get_item(item_id: int, item: Item):
item_db = await Item.objects.get(pk=item_id)
return await item_db.update(**item.dict())
@app.delete("/items/{item_id}")
async def delete_item(item_id: int, item: Item = None):
if item:
return {"deleted_rows": await item.delete()}
item_db = await Item.objects.get(pk=item_id)
return {"deleted_rows": await item_db.delete()}
``` ```
!!!note !!!note
@ -133,6 +208,6 @@ def test_all_endpoints():
You can read more on testing fastapi in [fastapi][fastapi] docs. You can read more on testing fastapi in [fastapi][fastapi] docs.
[fastapi]: https://fastapi.tiangolo.com/ [fastapi]: https://fastapi.tiangolo.com/
[models]: ./models.md [models]: ./models/index.md
[database initialization]: ../models/#database-initialization-migrations [database initialization]: ./models/migrations.md
[tests]: https://github.com/collerek/ormar/tree/master/tests [tests]: https://github.com/collerek/ormar/tree/master/tests

View File

@ -0,0 +1,126 @@
# Common Parameters
All `Field` types have a set of common parameters.
## primary_key
`primary_key`: `bool` = `False` -> by default False.
Sets the primary key column on a table, foreign keys always refer to the pk of the `Model`.
Used in sql only.
## autoincrement
`autoincrement`: `bool` = `primary_key and type == int` -> defaults to True if column is a primary key and of type Integer, otherwise False.
Can be only used with int/bigint fields.
If a field has autoincrement it becomes optional.
Used both in sql and pydantic (changes pk field to optional for autoincrement).
## nullable
`nullable`: `bool` = `not primary_key` -> defaults to False for primary key column, and True for all other.
Specifies if field is optional or required, used both with sql and pydantic.
!!!note
By default all `ForeignKeys` are also nullable, meaning the related `Model` is not required.
If you change the `ForeignKey` column to `nullable=False`, it becomes required.
!!!info
If you want to know more about how you can preload related models during queries and how the relations work read the [queries][queries] and [relations][relations] sections.
## default
`default`: `Any` = `None` -> defaults to None.
A default value used if no other value is passed.
In sql invoked on an insert, used during pydantic model definition.
If the field has a default value it becomes optional.
You can pass a static value or a Callable (function etc.)
Used both in sql and pydantic.
## server default
`server_default`: `Any` = `None` -> defaults to None.
A default value used if no other value is passed.
In sql invoked on the server side so you can pass i.e. sql function (like now() or query/value wrapped in sqlalchemy text() clause).
If the field has a server_default value it becomes optional.
You can pass a static value or a Callable (function etc.)
Used in sql only.
Sample usage:
```Python hl_lines="21-23"
--8<-- "../docs_src/fields/docs004.py"
```
!!!warning
`server_default` accepts `str`, `sqlalchemy.sql.elements.ClauseElement` or `sqlalchemy.sql.elements.TextClause`
so if you want to set i.e. Integer value you need to wrap it in `sqlalchemy.text()` function like above
!!!tip
You can pass also valid sql (dialect specific) wrapped in `sqlalchemy.text()`
For example `func.now()` above could be exchanged for `text('(CURRENT_TIMESTAMP)')` for sqlite backend
!!!info
`server_default` is passed straight to sqlalchemy table definition so you can read more in [server default][server default] sqlalchemy documentation
## index
`index`: `bool` = `False` -> by default False,
Sets the index on a table's column.
Used in sql only.
## unique
`unique`: `bool` = `False`
Sets the unique constraint on a table's column.
Used in sql only.
## pydantic_only
`pydantic_only`: `bool` = `False`
Prevents creation of a sql column for given field.
Used for data related to given model but not to be stored in the database.
Used in pydantic only.
## choices
`choices`: `Sequence` = `[]`
A set of choices allowed to be used for given field.
Used for data validation on pydantic side.
Prevents insertion of value not present in the choices list.
Used in pydantic only.
[relations]: ../relations/index.md
[queries]: ../queries.md
[pydantic]: https://pydantic-docs.helpmanual.io/usage/types/#constrained-types
[server default]: https://docs.sqlalchemy.org/en/13/core/defaults.html#server-invoked-ddl-explicit-default-expressions

View File

@ -10,128 +10,6 @@ There are 12 basic model field types and a special `ForeignKey` and `Many2Many`
Each of the `Fields` has assigned both `sqlalchemy` column class and python type that is used to create `pydantic` model. Each of the `Fields` has assigned both `sqlalchemy` column class and python type that is used to create `pydantic` model.
## Common Parameters
All `Field` types have a set of common parameters.
### primary_key
`primary_key`: `bool` = `False` -> by default False.
Sets the primary key column on a table, foreign keys always refer to the pk of the `Model`.
Used in sql only.
### autoincrement
`autoincrement`: `bool` = `primary_key and type == int` -> defaults to True if column is a primary key and of type Integer, otherwise False.
Can be only used with int/bigint fields.
If a field has autoincrement it becomes optional.
Used both in sql and pydantic (changes pk field to optional for autoincrement).
### nullable
`nullable`: `bool` = `not primary_key` -> defaults to False for primary key column, and True for all other.
Specifies if field is optional or required, used both with sql and pydantic.
!!!note
By default all `ForeignKeys` are also nullable, meaning the related `Model` is not required.
If you change the `ForeignKey` column to `nullable=False`, it becomes required.
!!!info
If you want to know more about how you can preload related models during queries and how the relations work read the [queries][queries] and [relations][relations] sections.
### default
`default`: `Any` = `None` -> defaults to None.
A default value used if no other value is passed.
In sql invoked on an insert, used during pydantic model definition.
If the field has a default value it becomes optional.
You can pass a static value or a Callable (function etc.)
Used both in sql and pydantic.
### server default
`server_default`: `Any` = `None` -> defaults to None.
A default value used if no other value is passed.
In sql invoked on the server side so you can pass i.e. sql function (like now() or query/value wrapped in sqlalchemy text() clause).
If the field has a server_default value it becomes optional.
You can pass a static value or a Callable (function etc.)
Used in sql only.
Sample usage:
```Python hl_lines="21-23"
--8<-- "../docs_src/fields/docs004.py"
```
!!!warning
`server_default` accepts `str`, `sqlalchemy.sql.elements.ClauseElement` or `sqlalchemy.sql.elements.TextClause`
so if you want to set i.e. Integer value you need to wrap it in `sqlalchemy.text()` function like above
!!!tip
You can pass also valid sql (dialect specific) wrapped in `sqlalchemy.text()`
For example `func.now()` above could be exchanged for `text('(CURRENT_TIMESTAMP)')` for sqlite backend
!!!info
`server_default` is passed straight to sqlalchemy table definition so you can read more in [server default][server default] sqlalchemy documentation
### index
`index`: `bool` = `False` -> by default False,
Sets the index on a table's column.
Used in sql only.
### unique
`unique`: `bool` = `False`
Sets the unique constraint on a table's column.
Used in sql only.
### pydantic_only
`pydantic_only`: `bool` = `False`
Prevents creation of a sql column for given field.
Used for data related to given model but not to be stored in the database.
Used in pydantic only.
### choices
`choices`: `Sequence` = `[]`
A set of choices allowed to be used for given field.
Used for data validation on pydantic side.
Prevents insertion of value not present in the choices list.
Used in pydantic only.
## Fields Types ## Fields Types
### String ### String
@ -261,12 +139,13 @@ You can use either `length` and `precision` parameters or `max_digits` and `deci
Depending on the format either 32 or 36 char is used in the database. Depending on the format either 32 or 36 char is used in the database.
Sample: Sample:
* 'hex' format value = "c616ab438cce49dbbf4380d109251dce" (CHAR(32))
* 'string' value = "c616ab43-8cce-49db-bf43-80d109251dce" (CHAR(36)) * 'hex' format value = `c616ab438cce49dbbf4380d109251dce` (CHAR(32))
* 'string' value = `c616ab43-8cce-49db-bf43-80d109251dce` (CHAR(36))
When loaded it's always python UUID so you can compare it and compare two formats values between each other. When loaded it's always python UUID so you can compare it and compare two formats values between each other.
[relations]: ./relations.md [relations]: ../relations/index.md
[queries]: ./queries.md [queries]: ../queries.md
[pydantic]: https://pydantic-docs.helpmanual.io/usage/types/#constrained-types [pydantic]: https://pydantic-docs.helpmanual.io/usage/types/#constrained-types
[server default]: https://docs.sqlalchemy.org/en/13/core/defaults.html#server-invoked-ddl-explicit-default-expressions [server default]: https://docs.sqlalchemy.org/en/13/core/defaults.html#server-invoked-ddl-explicit-default-expressions

View File

@ -130,11 +130,11 @@ album = await Album.objects.select_related("tracks").all()
assert len(album.tracks) == 3 assert len(album.tracks) == 3
# Fetch instances, with a filter across an FK relationship. # Fetch instances, with a filter across an FK relationship.
tracks = Track.objects.filter(album__name="Fantasies") tracks = await Track.objects.filter(album__name="Fantasies").all()
assert len(tracks) == 2 assert len(tracks) == 2
# Fetch instances, with a filter and operator across an FK relationship. # Fetch instances, with a filter and operator across an FK relationship.
tracks = Track.objects.filter(album__name__iexact="fantasies") tracks = await Track.objects.filter(album__name__iexact="fantasies").all()
assert len(tracks) == 2 assert len(tracks) == 2
# Limit a query # Limit a query
@ -149,6 +149,7 @@ assert len(tracks) == 1
* `create(**kwargs): -> Model` * `create(**kwargs): -> Model`
* `get(**kwargs): -> Model` * `get(**kwargs): -> Model`
* `get_or_create(**kwargs) -> Model` * `get_or_create(**kwargs) -> Model`
* `first(): -> Model`
* `update(each: bool = False, **kwargs) -> int` * `update(each: bool = False, **kwargs) -> int`
* `update_or_create(**kwargs) -> Model` * `update_or_create(**kwargs) -> Model`
* `bulk_create(objects: List[Model]) -> None` * `bulk_create(objects: List[Model]) -> None`

View File

@ -295,175 +295,6 @@ Note that type hints are **optional** so perfectly valid `ormar` code can look l
`ormar` construct annotations used by `pydantic` from own fields. `ormar` construct annotations used by `pydantic` from own fields.
### Database initialization/ migrations
Note that all examples assume that you already have a database.
If that is not the case and you need to create your tables, that's super easy as `ormar` is using sqlalchemy for underlying table construction.
All you have to do is call `create_all()` like in the example below.
```python
import sqlalchemy
# get your database url in sqlalchemy format - same as used with databases instance used in Model definition
engine = sqlalchemy.create_engine("sqlite:///test.db")
# note that this has to be the same metadata that is used in ormar Models definition
metadata.create_all(engine)
```
You can also create single tables, sqlalchemy tables are exposed in `ormar.Meta` class.
```python
import sqlalchemy
# get your database url in sqlalchemy format - same as used with databases instance used in Model definition
engine = sqlalchemy.create_engine("sqlite:///test.db")
# Artist is an ormar model from previous examples
Artist.Meta.table.create(engine)
```
!!!warning
You need to create the tables only once, so use a python console for that or remove the script from your production code after first use.
Likewise as with tables, since we base tables on sqlalchemy for migrations please use [alembic][alembic].
Use command line to reproduce this minimalistic example.
```python
alembic init alembic
alembic revision --autogenerate -m "made some changes"
alembic upgrade head
```
A quick example of alembic migrations should be something similar to:
When you have application structure like:
```
-> app
-> alembic (initialized folder - so run alembic init alembic inside app folder)
-> models (here are the models)
-> __init__.py
-> my_models.py
```
Your `env.py` file (in alembic folder) can look something like:
```python
from logging.config import fileConfig
from sqlalchemy import create_engine
from alembic import context
import sys, os
# add app folder to system path (alternative is running it from parent folder with python -m ...)
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here (the one used in ormar)
# for 'autogenerate' support
from app.models.my_models import metadata
target_metadata = metadata
# set your url here or import from settings
# note that by default url is in saved sqlachemy.url variable in alembic.ini file
URL = "sqlite:///test.db"
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=URL,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
# if you use UUID field set also this param
# the prefix has to match sqlalchemy import name in alembic
# that can be set by sqlalchemy_module_prefix option (default 'sa.')
user_module_prefix='sa.'
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = create_engine(URL)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
# if you use UUID field set also this param
# the prefix has to match sqlalchemy import name in alembic
# that can be set by sqlalchemy_module_prefix option (default 'sa.')
user_module_prefix='sa.'
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
```
You can also include/exclude specific tables with `include_object` parameter passed to `context.configure`. That should be a function returning `True/False` for given objects.
A sample function excluding tables starting with `data_` in name unless it's 'data_jobs':
```python
def include_object(object, name, type_, reflected, compare_to):
if name and name.startswith('data_') and name not in ['data_jobs']:
return False
return True
```
!!!note
Function parameters for `include_objects` (you can change the name) are required and defined in alembic
to check what they do check the [alembic][alembic] documentation
And you pass it into context like (both in online and offline):
```python
context.configure(
url=URL,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
user_module_prefix='sa.',
include_object=include_object
)
```
!!!info
You can read more about table creation, altering and migrations in [sqlalchemy table creation][sqlalchemy table creation] documentation.
### Dependencies ### Dependencies
@ -590,175 +421,9 @@ The objects itself have a saved status, which is set as following:
You can check if model is saved with `ModelInstance.saved` property You can check if model is saved with `ModelInstance.saved` property
## `Model` methods [fields]: ../fields/field-types.md
[relations]: ../relations/index.md
### load [queries]: ../queries.md
By default when you query a table without prefetching related models, the ormar will still construct
your related models, but populate them only with the pk value. You can load the related model by calling `load()` method.
`load()` can also be used to refresh the model from the database (if it was changed by some other process).
```python
track = await Track.objects.get(name='The Bird')
track.album.pk # will return malibu album pk (1)
track.album.name # will return None
# you need to actually load the data first
await track.album.load()
track.album.name # will return 'Malibu'
```
### save
`save() -> self`
You can create new models by using `QuerySet.create()` method or by initializing your model as a normal pydantic model
and later calling `save()` method.
`save()` can also be used to persist changes that you made to the model, but only if the primary key is not set or the model does not exist in database.
The `save()` method does not check if the model exists in db, so if it does you will get a integrity error from your selected db backend if trying to save model with already existing primary key.
```python
track = Track(name='The Bird')
await track.save() # will persist the model in database
track = await Track.objects.get(name='The Bird')
await track.save() # will raise integrity error as pk is populated
```
### update
`update(**kwargs) -> self`
You can update models by using `QuerySet.update()` method or by updating your model attributes (fields) and calling `update()` method.
If you try to update a model without a primary key set a `ModelPersistenceError` exception will be thrown.
To persist a newly created model use `save()` or `upsert(**kwargs)` methods.
```python
track = await Track.objects.get(name='The Bird')
await track.update(name='The Bird Strikes Again')
```
### upsert
`upsert(**kwargs) -> self`
It's an proxy to either `save()` or `update(**kwargs)` methods described above.
If the primary key is set -> the `update` method will be called.
If the pk is not set the `save()` method will be called.
```python
track = Track(name='The Bird')
await track.upsert() # will call save as the pk is empty
track = await Track.objects.get(name='The Bird')
await track.upsert(name='The Bird Strikes Again') # will call update as pk is already populated
```
### delete
You can delete models by using `QuerySet.delete()` method or by using your model and calling `delete()` method.
```python
track = await Track.objects.get(name='The Bird')
await track.delete() # will delete the model from database
```
!!!tip
Note that that `track` object stays the same, only record in the database is removed.
### save_related
`save_related(follow: bool = False) -> None`
Method goes through all relations of the `Model` on which the method is called,
and calls `upsert()` method on each model that is **not** saved.
To understand when a model is saved check [save status][save status] section above.
By default the `save_related` method saved only models that are directly related (one step away) to the model on which the method is called.
But you can specify the `follow=True` parameter to traverse through nested models and save all of them in the relation tree.
!!!warning
To avoid circular updates with `follow=True` set, `save_related` keeps a set of already visited Models,
and won't perform nested `save_related` on Models that were already visited.
So if you have a diamond or circular relations types you need to perform the updates in a manual way.
```python
# in example like this the second Street (coming from City) won't be save_related, so ZipCode won't be updated
Street -> District -> City -> Street -> ZipCode
```
## Internals
Apart from special parameters defined in the `Model` during definition (tablename, metadata etc.) the `Model` provides you with useful internals.
### Pydantic Model
All `Model` classes inherit from `pydantic.BaseModel` so you can access all normal attributes of pydantic models.
For example to list pydantic model fields you can:
```Python hl_lines="20"
--8<-- "../docs_src/models/docs003.py"
```
!!!tip
Note how the primary key `id` field is optional as `Integer` primary key by default has `autoincrement` set to `True`.
!!!info
For more options visit official [pydantic][pydantic] documentation.
### Sqlalchemy Table
To access auto created sqlalchemy table you can use `Model.Meta.table` parameter
For example to list table columns you can:
```Python hl_lines="20"
--8<-- "../docs_src/models/docs004.py"
```
!!!tip
You can access table primary key name by `Course.Meta.pkname`
!!!info
For more options visit official [sqlalchemy-metadata][sqlalchemy-metadata] documentation.
### Fields Definition
To access ormar `Fields` you can use `Model.Meta.model_fields` parameter
For example to list table model fields you can:
```Python hl_lines="20"
--8<-- "../docs_src/models/docs005.py"
```
!!!info
Note that fields stored on a model are `classes` not `instances`.
So if you print just model fields you will get:
`{'id': <class 'ormar.fields.model_fields.Integer'>, `
`'name': <class 'ormar.fields.model_fields.String'>, `
`'completed': <class 'ormar.fields.model_fields.Boolean'>}`
[fields]: ./fields.md
[relations]: ./relations.md
[queries]: ./queries.md
[pydantic]: https://pydantic-docs.helpmanual.io/ [pydantic]: https://pydantic-docs.helpmanual.io/
[sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/ [sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/
[sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html [sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html
@ -766,5 +431,5 @@ For example to list table model fields you can:
[sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls [sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
[sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables [sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables
[alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html [alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html
[save status]: ../models/#model-save-status [save status]: ../models/index/#model-save-status
[Internals]: #internals [Internals]: ../models/internals.md

463
docs/models/inheritance.md Normal file
View File

@ -0,0 +1,463 @@
# Inheritance
Out of various types of ORM models inheritance `ormar` currently supports two of them:
* **Mixins**
* **Concrete table inheritance** (with parents set to `abstract=True`)
## Types of inheritance
The short summary of different types of inheritance is:
* **Mixins [SUPPORTED]** - don't subclass `ormar.Model`, just define fields that are
later used on different models (like `created_date` and `updated_date` on each model),
only actual models create tables, but those fields from mixins are added
* **Concrete table inheritance [SUPPORTED]** - means that parent is marked as abstract
and each child has its own table with columns from a parent and own child columns, kind
of similar to Mixins but parent also is a Model
* **Single table inheritance [NOT SUPPORTED]** - means that only one table is created
with fields that are combination/sum of the parent and all children models but child
models use only subset of column in db (all parent and own ones, skipping the other
children ones)
* **Multi/ Joined table inheritance [NOT SUPPORTED]** - means that part of the columns
is saved on parent model and part is saved on child model that are connected to each
other by kind of one to one relation and under the hood you operate on two models at
once
* **Proxy models [NOT SUPPORTED]** - means that only parent has an actual table,
children just add methods, modify settings etc.
## Mixins
To use Mixins just define a class that is not inheriting from an `ormar.Model` but is
defining `ormar.Fields` as class variables.
```python
# a mixin defines the fields but is a normal python class
class AuditMixin:
created_by: str = ormar.String(max_length=100)
updated_by: str = ormar.String(max_length=100, default="Sam")
class DateFieldsMixins:
created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now)
updated_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now)
# a models can inherit from one or more mixins
class Category(ormar.Model, DateFieldsMixins, AuditMixin):
class Meta(ormar.ModelMeta):
tablename = "categories"
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50, unique=True, index=True)
code: int = ormar.Integer()
```
!!!tip
Note that Mixins are **not** models, so you still need to inherit
from `ormar.Model` as well as define `Meta` class in the **final** model.
A Category class above will have four additional fields: `created_date`, `updated_date`,
`created_by` and `updated_by`.
There will be only one table created for model `Category` (`categories`), with `Category` class fields
combined with all `Mixins` fields.
Note that `Mixin` in class name is optional but is a good python practice.
## Concrete table inheritance
In concept concrete table inheritance is very similar to Mixins, but uses
actual `ormar.Models` as base classes.
!!!warning
Note that base classes have `abstract=True` set in `Meta` class, if you try
to inherit from non abstract marked class `ModelDefinitionError` will be raised.
Since this abstract Model will never be initialized you can skip `metadata`
and `database` in it's `Meta` definition.
But if you provide it - it will be inherited, that way you do not have to
provide `metadata` and `databases` in the final/concrete class
Note that you can always overwrite it in child/concrete class if you need to.
More over at least one of the classes in inheritance chain have to provide both `database` and `metadata` -
otherwise an error will be raised.
```python
# note that base classes have abstract=True
# since this model will never be initialized you can skip metadata and database
class AuditModel(ormar.Model):
class Meta:
abstract = True
created_by: str = ormar.String(max_length=100)
updated_by: str = ormar.String(max_length=100, default="Sam")
# but if you provide it it will be inherited - DRY (Don't Repeat Yourself) in action
class DateFieldsModel(ormar.Model):
class Meta:
abstract = True
metadata = metadata
database = db
created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now)
updated_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now)
# that way you do not have to provide metadata and databases in concrete class
class Category(DateFieldsModel, AuditModel):
class Meta(ormar.ModelMeta):
tablename = "categories"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50, unique=True, index=True)
code: int = ormar.Integer()
```
The list of inherited options/settings is as follows: `metadata`, `database`
and `constraints`.
Also methods decorated with `@property_field` decorator will be inherited/recognized.
Of course apart from that all fields from base classes are combined and created in the
concrete table of the final Model.
!!!tip
Note how you don't have to provide `abstarct=False` in the final class - it's the default setting
that is not inherited.
## Redefining fields in subclasses
Note that you can redefine previously created fields like in normal python class
inheritance.
Whenever you define a field with same name and new definition it will completely replace
the previously defined one.
```python
# base class
class DateFieldsModel(ormar.Model):
class Meta:
abstract = True
metadata = metadata
database = db
# note that UniqueColumns need sqlalchemy db columns names not the ormar ones
constraints = [ormar.UniqueColumns("creation_date", "modification_date")]
created_date: datetime.datetime = ormar.DateTime(
default=datetime.datetime.now, name="creation_date"
)
updated_date: datetime.datetime = ormar.DateTime(
default=datetime.datetime.now, name="modification_date"
)
class RedefinedField(DateFieldsModel):
class Meta(ormar.ModelMeta):
tablename = "redefines"
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
# here the created_date is replaced by the String field
created_date: str = ormar.String(max_length=200, name="creation_date")
# you can verify that the final field is correctly declared and created
changed_field = RedefinedField.Meta.model_fields["created_date"]
assert changed_field.default is None
assert changed_field.alias == "creation_date"
assert any(x.name == "creation_date" for x in RedefinedField.Meta.table.columns)
assert isinstance(
RedefinedField.Meta.table.columns["creation_date"].type,
sqlalchemy.sql.sqltypes.String,
)
```
!!!warning
If you declare `UniqueColumns` constraint with column names, the final model **has to have**
a column with the same name declared. Otherwise, the `ModelDefinitionError` will be raised.
So in example above if you do not provide `name` for `created_date` in `RedefinedField` model
ormar will complain.
`created_date: str = ormar.String(max_length=200) # exception`
`created_date: str = ormar.String(max_length=200, name="creation_date2") # exception`
## Relations in inheritance
You can declare relations in every step of inheritance, so both in parent and child
classes.
When you define a relation on a child model level it's either overwriting the relation
defined in parent model (if the same field name is used), or is accessible only to this
child if you define a new relation.
When inheriting relations, you always need to be aware of `related_name` parameter, that
has to be unique across a related model, when you define multiple child classes that inherit the
same relation.
If you do not provide `related_name` parameter ormar calculates it for you. This works
with inheritance as all child models have to have different class names, which are used
to calculate the default `related_name` (class.name.lower()+'s').
But, if you provide a `related_name` this name cannot be reused in all child models as
they would overwrite each other on the related model side.
Therefore, you have two options:
* redefine relation field in child models and manually provide different `related_name`
parameters
* let this for `ormar` to handle -> auto adjusted related_name are: original
related_name + "_" + child model **table** name
That might sound complicated but let's look at the following example:
### ForeignKey relations
```python
# normal model used in relation
class Person(ormar.Model):
class Meta:
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
# parent model - needs to be abstract
class Car(ormar.Model):
class Meta:
abstract = True
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50)
owner: Person = ormar.ForeignKey(Person)
# note that we refer to the Person model again so we **have to** provide related_name
co_owner: Person = ormar.ForeignKey(Person, related_name="coowned")
created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now)
class Truck(Car):
class Meta:
pass
max_capacity: int = ormar.Integer()
class Bus(Car):
class Meta:
# default naming is name.lower()+'s' so it's ugly for buss ;)
tablename = "buses"
max_persons: int = ormar.Integer()
```
Now when you will inspect the fields on Person model you will get:
```python
Person.Meta.model_fields
"""
{'id': <class 'ormar.fields.model_fields.Integer'>,
'name': <class 'ormar.fields.model_fields.String'>,
'trucks': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_trucks': <class 'ormar.fields.foreign_key.ForeignKey'>,
'buss': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_buses': <class 'ormar.fields.foreign_key.ForeignKey'>}
"""
```
Note how you have `trucks` and `buss` fields that leads to Truck and Bus class that
this Person owns. There were no `related_name` parameter so default names were used.
At the same time the co-owned cars need to be referenced by `coowned_trucks`
and `coowned_buses`. Ormar appended `_trucks` and `_buses` suffixes taken from child
model table names.
Seems fine, but the default name for owned trucks is ok (`trucks`) but the `buss` is
ugly, so how can we change it?
The solution is pretty simple - just redefine the field in Bus class and provide
different `related_name` parameter.
```python
# rest of the above example remains the same
class Bus(Car):
class Meta:
tablename = "buses"
# new field that changes the related_name
owner: Person = ormar.ForeignKey(Person, related_name="buses")
max_persons: int = ormar.Integer()
```
Now the columns looks much better.
```python
Person.Meta.model_fields
"""
{'id': <class 'ormar.fields.model_fields.Integer'>,
'name': <class 'ormar.fields.model_fields.String'>,
'trucks': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_trucks': <class 'ormar.fields.foreign_key.ForeignKey'>,
'buses': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_buses': <class 'ormar.fields.foreign_key.ForeignKey'>}
"""
```
!!!note
You could also provide `related_name` for the `owner` field, that way the proper suffixes
would be added.
`owner: Person = ormar.ForeignKey(Person, related_name="owned")`
and model fields for Person owned cars would become `owned_trucks` and `owned_buses`.
### ManyToMany relations
Similarly, you can inherit from Models that have ManyToMany relations declared but
there is one, but substantial difference - the Through model.
Since in the future the Through model will be able to hold additional fields and now it links only two Tables
(`from` and `to` ones), each child that inherits the m2m relation field has to have separate
Through model.
Of course, you can overwrite the relation in each Child model, but that requires additional
code and undermines the point of the whole inheritance. `Ormar` will handle this for you if
you agree with default naming convention, which you can always manually overwrite in
children if needed.
Again, let's look at the example to easier grasp the concepts.
We will modify the previous example described above to use m2m relation for co_owners.
```python
# person remain the same as above
class Person(ormar.Model):
class Meta:
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
# new through model between Person and Car2
class PersonsCar(ormar.Model):
class Meta:
tablename = "cars_x_persons"
metadata = metadata
database = db
# note how co_owners is now ManyToMany relation
class Car2(ormar.Model):
class Meta:
# parent class needs to be marked abstract
abstract = True
metadata = metadata
database = db
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=50)
# note the related_name - needs to be unique across Person
# model, regardless of how many different models leads to Person
owner: Person = ormar.ForeignKey(Person, related_name="owned")
co_owners: List[Person] = ormar.ManyToMany(
Person, through=PersonsCar, related_name="coowned"
)
created_date: datetime.datetime = ormar.DateTime(default=datetime.datetime.now)
# child models define only additional Fields
class Truck2(Car2):
class Meta:
# note how you don't have to provide inherited Meta params
tablename = "trucks2"
max_capacity: int = ormar.Integer()
class Bus2(Car2):
class Meta:
tablename = "buses2"
max_persons: int = ormar.Integer()
```
`Ormar` automatically modifies related_name of the fields to include the **table** name
of the children models. The dafault name is original related_name + '_' + child table name.
That way for class Truck2 the relation defined in
`owner: Person = ormar.ForeignKey(Person, related_name="owned")` becomes `owned_trucks2`
You can verify the names by inspecting the list of fields present on `Person` model.
```python
Person.Meta.model_fields
{
# note how all relation fields need to be unique on Person
# regardless if autogenerated or manually overwritten
'id': <class 'ormar.fields.model_fields.Integer'>,
'name': <class 'ormar.fields.model_fields.String'>,
# note that we expanded on previous example so all 'old' fields are here
'trucks': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_trucks': <class 'ormar.fields.foreign_key.ForeignKey'>,
'buses': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_buses': <class 'ormar.fields.foreign_key.ForeignKey'>,
# newly defined related fields
'owned_trucks2': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_trucks2': <class 'abc.ManyToMany'>,
'owned_buses2': <class 'ormar.fields.foreign_key.ForeignKey'>,
'coowned_buses2': <class 'abc.ManyToMany'>
}
```
But that's not all. It's kind of internal to `ormar` but affects the data structure in the database,
so let's examine the through models for both `Bus2` and `Truck2` models.
```python
Bus2.Meta.model_fields['co_owners'].through
<class 'abc.PersonsCarBus2'>
Bus2.Meta.model_fields['co_owners'].through.Meta.tablename
'cars_x_persons_buses2'
Truck2.Meta.model_fields['co_owners'].through
<class 'abc.PersonsCarTruck2'>
Truck2.Meta.model_fields['co_owners'].through.Meta.tablename
'cars_x_persons_trucks2'
```
As you can see above `ormar` cloned the Through model for each of the Child classes and added
Child **class** name at the end, while changing the table names of the cloned fields
the name of the **table** from the child is used.
Note that original model is not only not used, the table for this model is removed from metadata:
```python
Bus2.Meta.metadata.tables.keys()
dict_keys(['test_date_models', 'categories', 'subjects', 'persons', 'trucks', 'buses',
'cars_x_persons_trucks2', 'trucks2', 'cars_x_persons_buses2', 'buses2'])
```
So be aware that if you introduce inheritance along the way and convert a model into
abstract parent model you may lose your data on through table if not careful.
!!!note
Note that original table name and model name of the Through model is never used.
Only the cloned models tables are created and used.
!!!warning
Note that each subclass of the Model that has `ManyToMany` relation defined generates
a new `Through` model, meaning also **new database table**.
That means that each time you define a Child model you need to either manually create
the table in the database, or run a migration (with alembic).

70
docs/models/internals.md Normal file
View File

@ -0,0 +1,70 @@
# Internals
Apart from special parameters defined in the `Model` during definition (tablename, metadata etc.) the `Model` provides you with useful internals.
## Pydantic Model
All `Model` classes inherit from `pydantic.BaseModel` so you can access all normal attributes of pydantic models.
For example to list pydantic model fields you can:
```Python hl_lines="20"
--8<-- "../docs_src/models/docs003.py"
```
!!!tip
Note how the primary key `id` field is optional as `Integer` primary key by default has `autoincrement` set to `True`.
!!!info
For more options visit official [pydantic][pydantic] documentation.
## Sqlalchemy Table
To access auto created sqlalchemy table you can use `Model.Meta.table` parameter
For example to list table columns you can:
```Python hl_lines="20"
--8<-- "../docs_src/models/docs004.py"
```
!!!tip
You can access table primary key name by `Course.Meta.pkname`
!!!info
For more options visit official [sqlalchemy-metadata][sqlalchemy-metadata] documentation.
## Fields Definition
To access ormar `Fields` you can use `Model.Meta.model_fields` parameter
For example to list table model fields you can:
```Python hl_lines="20"
--8<-- "../docs_src/models/docs005.py"
```
!!!info
Note that fields stored on a model are `classes` not `instances`.
So if you print just model fields you will get:
`{'id': <class 'ormar.fields.model_fields.Integer'>, `
`'name': <class 'ormar.fields.model_fields.String'>, `
`'completed': <class 'ormar.fields.model_fields.Boolean'>}`
[fields]: ./fields.md
[relations]: ./relations/index.md
[queries]: ./queries.md
[pydantic]: https://pydantic-docs.helpmanual.io/
[sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/
[sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html
[databases]: https://github.com/encode/databases
[sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
[sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables
[alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html
[save status]: ../models/#model-save-status
[Internals]: #internals

130
docs/models/methods.md Normal file
View File

@ -0,0 +1,130 @@
# Model methods
!!!tip
Main interaction with the databases is exposed through a `QuerySet` object exposed on
each model as `Model.objects` similar to the django orm.
To read more about **quering, joining tables, excluding fields etc. visit [queries][queries] section.**
Each model instance have a set of methods to `save`, `update` or `load` itself.
Available methods are described below.
## load
By default when you query a table without prefetching related models, the ormar will still construct
your related models, but populate them only with the pk value. You can load the related model by calling `load()` method.
`load()` can also be used to refresh the model from the database (if it was changed by some other process).
```python
track = await Track.objects.get(name='The Bird')
track.album.pk # will return malibu album pk (1)
track.album.name # will return None
# you need to actually load the data first
await track.album.load()
track.album.name # will return 'Malibu'
```
## save
`save() -> self`
You can create new models by using `QuerySet.create()` method or by initializing your model as a normal pydantic model
and later calling `save()` method.
`save()` can also be used to persist changes that you made to the model, but only if the primary key is not set or the model does not exist in database.
The `save()` method does not check if the model exists in db, so if it does you will get a integrity error from your selected db backend if trying to save model with already existing primary key.
```python
track = Track(name='The Bird')
await track.save() # will persist the model in database
track = await Track.objects.get(name='The Bird')
await track.save() # will raise integrity error as pk is populated
```
## update
`update(**kwargs) -> self`
You can update models by using `QuerySet.update()` method or by updating your model attributes (fields) and calling `update()` method.
If you try to update a model without a primary key set a `ModelPersistenceError` exception will be thrown.
To persist a newly created model use `save()` or `upsert(**kwargs)` methods.
```python
track = await Track.objects.get(name='The Bird')
await track.update(name='The Bird Strikes Again')
```
## upsert
`upsert(**kwargs) -> self`
It's an proxy to either `save()` or `update(**kwargs)` methods described above.
If the primary key is set -> the `update` method will be called.
If the pk is not set the `save()` method will be called.
```python
track = Track(name='The Bird')
await track.upsert() # will call save as the pk is empty
track = await Track.objects.get(name='The Bird')
await track.upsert(name='The Bird Strikes Again') # will call update as pk is already populated
```
## delete
You can delete models by using `QuerySet.delete()` method or by using your model and calling `delete()` method.
```python
track = await Track.objects.get(name='The Bird')
await track.delete() # will delete the model from database
```
!!!tip
Note that that `track` object stays the same, only record in the database is removed.
## save_related
`save_related(follow: bool = False) -> None`
Method goes through all relations of the `Model` on which the method is called,
and calls `upsert()` method on each model that is **not** saved.
To understand when a model is saved check [save status][save status] section above.
By default the `save_related` method saved only models that are directly related (one step away) to the model on which the method is called.
But you can specify the `follow=True` parameter to traverse through nested models and save all of them in the relation tree.
!!!warning
To avoid circular updates with `follow=True` set, `save_related` keeps a set of already visited Models,
and won't perform nested `save_related` on Models that were already visited.
So if you have a diamond or circular relations types you need to perform the updates in a manual way.
```python
# in example like this the second Street (coming from City) won't be save_related, so ZipCode won't be updated
Street -> District -> City -> Street -> ZipCode
```
[fields]: ../fields.md
[relations]: ../relations/index.md
[queries]: ../queries.md
[pydantic]: https://pydantic-docs.helpmanual.io/
[sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/
[sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html
[databases]: https://github.com/encode/databases
[sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
[sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables
[alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html
[save status]: ../models/index/#model-save-status
[Internals]: #internals

193
docs/models/migrations.md Normal file
View File

@ -0,0 +1,193 @@
# Migrations
## Database Initialization
Note that all examples assume that you already have a database.
If that is not the case and you need to create your tables, that's super easy as `ormar` is using sqlalchemy for underlying table construction.
All you have to do is call `create_all()` like in the example below.
```python
import sqlalchemy
# get your database url in sqlalchemy format - same as used with databases instance used in Model definition
engine = sqlalchemy.create_engine("sqlite:///test.db")
# note that this has to be the same metadata that is used in ormar Models definition
metadata.create_all(engine)
```
You can also create single tables, sqlalchemy tables are exposed in `ormar.Meta` class.
```python
import sqlalchemy
# get your database url in sqlalchemy format - same as used with databases instance used in Model definition
engine = sqlalchemy.create_engine("sqlite:///test.db")
# Artist is an ormar model from previous examples
Artist.Meta.table.create(engine)
```
!!!warning
You need to create the tables only once, so use a python console for that or remove the script from your production code after first use.
## Alembic usage
Likewise as with tables, since we base tables on sqlalchemy for migrations please use [alembic][alembic].
### Initialization
Use command line to reproduce this minimalistic example.
```python
alembic init alembic
alembic revision --autogenerate -m "made some changes"
alembic upgrade head
```
### Sample env.py file
A quick example of alembic migrations should be something similar to:
When you have application structure like:
```
-> app
-> alembic (initialized folder - so run alembic init alembic inside app folder)
-> models (here are the models)
-> __init__.py
-> my_models.py
```
Your `env.py` file (in alembic folder) can look something like:
```python
from logging.config import fileConfig
from sqlalchemy import create_engine
from alembic import context
import sys, os
# add app folder to system path (alternative is running it from parent folder with python -m ...)
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here (the one used in ormar)
# for 'autogenerate' support
from app.models.my_models import metadata
target_metadata = metadata
# set your url here or import from settings
# note that by default url is in saved sqlachemy.url variable in alembic.ini file
URL = "sqlite:///test.db"
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=URL,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
# if you use UUID field set also this param
# the prefix has to match sqlalchemy import name in alembic
# that can be set by sqlalchemy_module_prefix option (default 'sa.')
user_module_prefix='sa.'
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = create_engine(URL)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
# if you use UUID field set also this param
# the prefix has to match sqlalchemy import name in alembic
# that can be set by sqlalchemy_module_prefix option (default 'sa.')
user_module_prefix='sa.'
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
```
### Excluding tables
You can also include/exclude specific tables with `include_object` parameter passed to `context.configure`. That should be a function returning `True/False` for given objects.
A sample function excluding tables starting with `data_` in name unless it's 'data_jobs':
```python
def include_object(object, name, type_, reflected, compare_to):
if name and name.startswith('data_') and name not in ['data_jobs']:
return False
return True
```
!!!note
Function parameters for `include_objects` (you can change the name) are required and defined in alembic
to check what they do check the [alembic][alembic] documentation
And you pass it into context like (both in online and offline):
```python
context.configure(
url=URL,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
user_module_prefix='sa.',
include_object=include_object
)
```
!!!info
You can read more about table creation, altering and migrations in [sqlalchemy table creation][sqlalchemy table creation] documentation.
[fields]: ./fields.md
[relations]: ./relations/index.md
[queries]: ./queries.md
[pydantic]: https://pydantic-docs.helpmanual.io/
[sqlalchemy-core]: https://docs.sqlalchemy.org/en/latest/core/
[sqlalchemy-metadata]: https://docs.sqlalchemy.org/en/13/core/metadata.html
[databases]: https://github.com/encode/databases
[sqlalchemy connection string]: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
[sqlalchemy table creation]: https://docs.sqlalchemy.org/en/13/core/metadata.html#creating-and-dropping-database-tables
[alembic]: https://alembic.sqlalchemy.org/en/latest/tutorial.html
[save status]: ../models/index/#model-save-status
[Internals]: #internals

View File

@ -47,7 +47,7 @@ await malibu.save()
Get's the first row from the db meeting the criteria set by kwargs. Get's the first row from the db meeting the criteria set by kwargs.
If no criteria set it will return the first row in db. If no criteria set it will return the last row in db sorted by pk.
Passing a criteria is actually calling filter(**kwargs) method described below. Passing a criteria is actually calling filter(**kwargs) method described below.
@ -86,6 +86,13 @@ assert album == album2
!!!note !!!note
Note that if you want to create a new object you either have to pass pk column value or pk column has to be set as autoincrement Note that if you want to create a new object you either have to pass pk column value or pk column has to be set as autoincrement
### first
`first(): -> Model`
Gets the first row from the db ordered by primary key column ascending.
### update ### update
`update(each: bool = False, **kwargs) -> int` `update(each: bool = False, **kwargs) -> int`
@ -447,9 +454,12 @@ any attribute it will be updated on all parents as they share the same child obj
### limit ### limit
`limit(limit_count: int) -> QuerySet` `limit(limit_count: int, limit_raw_sql: bool = None) -> QuerySet`
You can limit the results to desired number of rows. You can limit the results to desired number of parent models.
To limit the actual number of database query rows instead of number of main models
use the `limit_raw_sql` parameter flag, and set it to `True`.
```python ```python
tracks = await Track.objects.limit(1).all() tracks = await Track.objects.limit(1).all()
@ -465,9 +475,12 @@ tracks = await Track.objects.limit(1).all()
### offset ### offset
`offset(offset: int) -> QuerySet` `offset(offset: int, limit_raw_sql: bool = None) -> QuerySet`
You can also offset the results by desired number of rows. You can also offset the results by desired number of main models.
To offset the actual number of database query rows instead of number of main models
use the `limit_raw_sql` parameter flag, and set it to `True`.
```python ```python
tracks = await Track.objects.offset(1).limit(1).all() tracks = await Track.objects.offset(1).limit(1).all()
@ -701,5 +714,5 @@ assert owner.toys[1].name == "Toy 1"
Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()` Something like `Track.object.select_related("album").filter(album__name="Malibu").offset(1).limit(1).all()`
[models]: ./models.md [models]: ./models/index.md
[relations]: ./relations.md [relations]: ./relations/index.md

View File

@ -1,430 +0,0 @@
# Relations
## Defining a relationship
### ForeignKey
`ForeignKey(to, related_name=None)` has required parameters `to` that takes target `Model` class.
Sqlalchemy column and Type are automatically taken from target `Model`.
* Sqlalchemy column: class of a target `Model` primary key column
* Type (used for pydantic): type of a target `Model`
#### Defining Models
To define a relation add `ForeignKey` field that points to related `Model`.
```Python hl_lines="29"
--8<-- "../docs_src/fields/docs003.py"
```
#### Reverse Relation
`ForeignKey` fields are automatically registering reverse side of the relation.
By default it's child (source) `Model` name + s, like courses in snippet below:
```Python hl_lines="29 35"
--8<-- "../docs_src/fields/docs001.py"
```
Reverse relation exposes API to manage related objects also from parent side.
##### add
Adding child model from parent side causes adding related model to currently loaded parent relation,
as well as sets child's model foreign key value and updates the model.
```python
department = await Department(name="Science").save()
course = Course(name="Math", completed=False) # note - not saved
await department.courses.add(course)
assert course.pk is not None # child model was saved
# relation on child model is set and FK column saved in db
assert courses.department == department
# relation on parent model is also set
assert department.courses[0] == course
```
!!!warning
If you want to add child model on related model the primary key value for parent model **has to exist in database**.
Otherwise ormar will raise RelationshipInstanceError as it cannot set child's ForeignKey column value
if parent model has no primary key value.
That means that in example above the department has to be saved before you can call `department.courses.add()`.
##### remove
Removal of the related model one by one.
In reverse relation calling `remove()` does not remove the child model, but instead nulls it ForeignKey value.
```python
# continuing from above
await department.courses.remove(course)
assert len(department.courses) == 0
# course still exists and was saved in remove
assert course.pk is not None
assert course.department is None
# to remove child from db
await course.delete()
```
But if you want to clear the relation and delete the child at the same time you can issue:
```python
# this will not only clear the relation
# but also delete related course from db
await department.courses.remove(course, keep_reversed=False)
```
##### clear
Removal of all related models in one call.
Like remove by default `clear()` nulls the ForeigKey column on child model (all, not matter if they are loaded or not).
```python
# nulls department column on all courses related to this department
await department.courses.clear()
```
If you want to remove the children altogether from the database, set `keep_reversed=False`
```python
# deletes from db all courses related to this department
await department.courses.clear(keep_reversed=False)
```
##### QuerysetProxy
Reverse relation exposes QuerysetProxy API that allows you to query related model like you would issue a normal Query.
To read which methods of QuerySet are available read below [querysetproxy][querysetproxy]
#### related_name
But you can overwrite this name by providing `related_name` parameter like below:
```Python hl_lines="29 35"
--8<-- "../docs_src/fields/docs002.py"
```
!!!tip
The reverse relation on access returns list of `wekref.proxy` to avoid circular references.
### Relation Setup
You have several ways to set-up a relationship connection.
#### `Model` instance
The most obvious one is to pass a related `Model` instance to the constructor.
```Python hl_lines="34-35"
--8<-- "../docs_src/relations/docs001.py"
```
#### Primary key value
You can setup the relation also with just the pk column value of the related model.
```Python hl_lines="37-38"
--8<-- "../docs_src/relations/docs001.py"
```
#### Dictionary
Next option is with a dictionary of key-values of the related model.
You can build the dictionary yourself or get it from existing model with `dict()` method.
```Python hl_lines="40-41"
--8<-- "../docs_src/relations/docs001.py"
```
#### None
Finally you can explicitly set it to None (default behavior if no value passed).
```Python hl_lines="43-44"
--8<-- "../docs_src/relations/docs001.py"
```
!!!warning
In all not None cases the primary key value for related model **has to exist in database**.
Otherwise an IntegrityError will be raised by your database driver library.
### ManyToMany
`ManyToMany(to, through)` has required parameters `to` and `through` that takes target and relation `Model` classes.
Sqlalchemy column and Type are automatically taken from target `Model`.
* Sqlalchemy column: class of a target `Model` primary key column
* Type (used for pydantic): type of a target `Model`
####Defining `Models`
```Python
--8<-- "../docs_src/relations/docs002.py"
```
Create sample data:
```Python
guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum")
post = await Post.objects.create(title="Hello, M2M", author=guido)
news = await Category.objects.create(name="News")
```
#### add
```python
# Add a category to a post.
await post.categories.add(news)
# or from the other end:
await news.posts.add(post)
```
!!!warning
In all not None cases the primary key value for related model **has to exist in database**.
Otherwise an IntegrityError will be raised by your database driver library.
#### remove
Removal of the related model one by one.
Removes also the relation in the database.
```python
await news.posts.remove(post)
```
#### clear
Removal of all related models in one call.
Removes also the relation in the database.
```python
await news.posts.clear()
```
#### QuerysetProxy
Reverse relation exposes QuerysetProxy API that allows you to query related model like you would issue a normal Query.
To read which methods of QuerySet are available read below [querysetproxy][querysetproxy]
### QuerySetProxy
When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models.
But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model.
!!!note
By default exposed QuerySet is already filtered to return only `Models` related to parent `Model`.
So if you issue `post.categories.all()` you will get all categories related to that post, not all in table.
!!!note
Note that when accessing QuerySet API methods through QuerysetProxy you don't
need to use `objects` attribute like in normal queries.
So note that it's `post.categories.all()` and **not** `post.categories.objects.all()`.
To learn more about available QuerySet methods visit [queries][queries]
!!!warning
Querying related models from ManyToMany cleans list of related models loaded on parent model:
Example: `post.categories.first()` will set post.categories to list of 1 related model -> the one returned by first()
Example 2: if post has 4 categories so `len(post.categories) == 4` calling `post.categories.limit(2).all()`
-> will load only 2 children and now `assert len(post.categories) == 2`
This happens for all QuerysetProxy methods returning data: `get`, `all` and `first` and in `get_or_create` if model already exists.
Note that value returned by `create` or created in `get_or_create` and `update_or_create`
if model does not exist will be added to relation list (not clearing it).
#### get
`get(**kwargs): -> Model`
To grab just one of related models filtered by name you can use `get(**kwargs)` method.
```python
# grab one category
assert news == await post.categories.get(name="News")
# note that method returns the category so you can grab this value
# but it also modifies list of related models in place
# so regardless of what was previously loaded on parent model
# now it has only one value -> just loaded with get() call
assert len(post.categories) == 1
assert post.categories[0] == news
```
!!!tip
Read more in queries documentation [get][get]
#### all
`all(**kwargs) -> List[Optional["Model"]]`
To get a list of related models use `all()` method.
Note that you can filter the queryset, select related, exclude fields etc. like in normal query.
```python
# with all Queryset methods - filtering, selecting columns, counting etc.
await news.posts.filter(title__contains="M2M").all()
await Category.objects.filter(posts__author=guido).get()
# columns models of many to many relation can be prefetched
news_posts = await news.posts.select_related("author").all()
assert news_posts[0].author == guido
```
!!!tip
Read more in queries documentation [all][all]
#### create
`create(**kwargs): -> Model`
Create related `Model` directly from parent `Model`.
The link table is automatically populated, as well as relation ids in the database.
```python
# Creating columns object from instance:
await post.categories.create(name="Tips")
assert len(await post.categories.all()) == 2
# newly created instance already have relation persisted in the database
```
!!!tip
Read more in queries documentation [create][create]
#### get_or_create
`get_or_create(**kwargs) -> Model`
!!!tip
Read more in queries documentation [get_or_create][get_or_create]
#### update_or_create
`update_or_create(**kwargs) -> Model`
!!!tip
Read more in queries documentation [update_or_create][update_or_create]
#### filter
`filter(**kwargs) -> QuerySet`
!!!tip
Read more in queries documentation [filter][filter]
#### exclude
`exclude(**kwargs) -> QuerySet`
!!!tip
Read more in queries documentation [exclude][exclude]
#### select_related
`select_related(related: Union[List, str]) -> QuerySet`
!!!tip
Read more in queries documentation [select_related][select_related]
#### prefetch_related
`prefetch_related(related: Union[List, str]) -> QuerySet`
!!!tip
Read more in queries documentation [prefetch_related][prefetch_related]
#### limit
`limit(limit_count: int) -> QuerySet`
!!!tip
Read more in queries documentation [limit][limit]
#### offset
`offset(offset: int) -> QuerySet`
!!!tip
Read more in queries documentation [offset][offset]
#### count
`count() -> int`
!!!tip
Read more in queries documentation [count][count]
#### exists
`exists() -> bool`
!!!tip
Read more in queries documentation [exists][exists]
#### fields
`fields(columns: Union[List, str, set, dict]) -> QuerySet`
!!!tip
Read more in queries documentation [fields][fields]
#### exclude_fields
`exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet`
!!!tip
Read more in queries documentation [exclude_fields][exclude_fields]
#### order_by
`order_by(columns:Union[List, str]) -> QuerySet`
!!!tip
Read more in queries documentation [order_by][order_by]
[queries]: ./queries.md
[querysetproxy]: ./relations.md#querysetproxy-methods
[get]: ./queries.md#get
[all]: ./queries.md#all
[create]: ./queries.md#create
[get_or_create]: ./queries.md#get_or_create
[update_or_create]: ./queries.md#update_or_create
[filter]: ./queries.md#filter
[exclude]: ./queries.md#exclude
[select_related]: ./queries.md#select_related
[prefetch_related]: ./queries.md#prefetch_related
[limit]: ./queries.md#limit
[offset]: ./queries.md#offset
[count]: ./queries.md#count
[exists]: ./queries.md#exists
[fields]: ./queries.md#fields
[exclude_fields]: ./queries.md#exclude_fields
[order_by]: ./queries.md#order_by

View File

@ -0,0 +1,181 @@
# ForeignKey
`ForeignKey(to, related_name=None)` has required parameters `to` that takes target `Model` class.
Sqlalchemy column and Type are automatically taken from target `Model`.
* Sqlalchemy column: class of a target `Model` primary key column
* Type (used for pydantic): type of a target `Model`
## Defining Models
To define a relation add `ForeignKey` field that points to related `Model`.
```Python hl_lines="29"
--8<-- "../docs_src/fields/docs003.py"
```
## Reverse Relation
`ForeignKey` fields are automatically registering reverse side of the relation.
By default it's child (source) `Model` name + s, like courses in snippet below:
```Python hl_lines="29 35"
--8<-- "../docs_src/fields/docs001.py"
```
Reverse relation exposes API to manage related objects also from parent side.
### add
Adding child model from parent side causes adding related model to currently loaded parent relation,
as well as sets child's model foreign key value and updates the model.
```python
department = await Department(name="Science").save()
course = Course(name="Math", completed=False) # note - not saved
await department.courses.add(course)
assert course.pk is not None # child model was saved
# relation on child model is set and FK column saved in db
assert courses.department == department
# relation on parent model is also set
assert department.courses[0] == course
```
!!!warning
If you want to add child model on related model the primary key value for parent model **has to exist in database**.
Otherwise ormar will raise RelationshipInstanceError as it cannot set child's ForeignKey column value
if parent model has no primary key value.
That means that in example above the department has to be saved before you can call `department.courses.add()`.
### remove
Removal of the related model one by one.
In reverse relation calling `remove()` does not remove the child model, but instead nulls it ForeignKey value.
```python
# continuing from above
await department.courses.remove(course)
assert len(department.courses) == 0
# course still exists and was saved in remove
assert course.pk is not None
assert course.department is None
# to remove child from db
await course.delete()
```
But if you want to clear the relation and delete the child at the same time you can issue:
```python
# this will not only clear the relation
# but also delete related course from db
await department.courses.remove(course, keep_reversed=False)
```
### clear
Removal of all related models in one call.
Like remove by default `clear()` nulls the ForeigKey column on child model (all, not matter if they are loaded or not).
```python
# nulls department column on all courses related to this department
await department.courses.clear()
```
If you want to remove the children altogether from the database, set `keep_reversed=False`
```python
# deletes from db all courses related to this department
await department.courses.clear(keep_reversed=False)
```
## QuerysetProxy
Reverse relation exposes QuerysetProxy API that allows you to query related model like you would issue a normal Query.
To read which methods of QuerySet are available read below [querysetproxy][querysetproxy]
## related_name
But you can overwrite this name by providing `related_name` parameter like below:
```Python hl_lines="29 35"
--8<-- "../docs_src/fields/docs002.py"
```
!!!tip
The reverse relation on access returns list of `wekref.proxy` to avoid circular references.
!!!warning
When you provide multiple relations to the same model `ormar` can no longer auto generate
the `related_name` for you. Therefore, in that situation you **have to** provide `related_name`
for all but one (one can be default and generated) or all related fields.
## Relation Setup
You have several ways to set-up a relationship connection.
### `Model` instance
The most obvious one is to pass a related `Model` instance to the constructor.
```Python hl_lines="34-35"
--8<-- "../docs_src/relations/docs001.py"
```
### Primary key value
You can setup the relation also with just the pk column value of the related model.
```Python hl_lines="37-38"
--8<-- "../docs_src/relations/docs001.py"
```
### Dictionary
Next option is with a dictionary of key-values of the related model.
You can build the dictionary yourself or get it from existing model with `dict()` method.
```Python hl_lines="40-41"
--8<-- "../docs_src/relations/docs001.py"
```
### None
Finally you can explicitly set it to None (default behavior if no value passed).
```Python hl_lines="43-44"
--8<-- "../docs_src/relations/docs001.py"
```
!!!warning
In all not None cases the primary key value for related model **has to exist in database**.
Otherwise an IntegrityError will be raised by your database driver library.
[queries]: ./queries.md
[querysetproxy]: ./queryset-proxy.md
[get]: ./queries.md#get
[all]: ./queries.md#all
[create]: ./queries.md#create
[get_or_create]: ./queries.md#get_or_create
[update_or_create]: ./queries.md#update_or_create
[filter]: ./queries.md#filter
[exclude]: ./queries.md#exclude
[select_related]: ./queries.md#select_related
[prefetch_related]: ./queries.md#prefetch_related
[limit]: ./queries.md#limit
[offset]: ./queries.md#offset
[count]: ./queries.md#count
[exists]: ./queries.md#exists
[fields]: ./queries.md#fields
[exclude_fields]: ./queries.md#exclude_fields
[order_by]: ./queries.md#order_by

98
docs/relations/index.md Normal file
View File

@ -0,0 +1,98 @@
# Relations
Currently `ormar` supports two types of relations:
* One-to-many (and many-to-one) with `ForeignKey` field
* Many-to-many with `ManyToMany` field
Below you can find a very basic examples of definitions for each of those relations.
To read more about methods, possibilities, definition etc. please read the subsequent section of the documentation.
## ForeignKey
To define many-to-one relation use `ForeignKey` field.
```Python hl_lines="17"
--8<-- "../docs_src/relations/docs003.py"
```
!!!tip
To read more about one-to-many relations visit [foreign-keys][foreign-keys] section
## Reverse ForeignKey
The definition of one-to-many relation also uses `ForeignKey`, and it's registered for you automatically.
So in relation ato example above.
```Python hl_lines="17"
class Department(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
# there is a virtual field here like follows
courses: Optional[List[Course]] = ormar.ForeignKey(Course, virtual=True)
# note that you DO NOT define it yourself, ormar does it for you.
```
!!!tip
To read more about many-to-one relations (i.e changing the name of generated field) visit [foreign-keys][foreign-keys] section
!!!tip
Reverse ForeignKey allows you to query the related models with [queryset-proxy][queryset-proxy].
It allows you to use `await department.courses.all()` to fetch data related only to specific department etc.
##ManyToMany
To define many-to-many relation use `ManyToMany` field.
```python hl_lines="25-26"
class Category(ormar.Model):
class Meta:
tablename = "categories"
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=40)
# note: you need to specify through model
class PostCategory(ormar.Model):
class Meta:
tablename = "posts_categories"
database = database
metadata = metadata
class Post(ormar.Model):
class Meta:
tablename = "posts"
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
title: str = ormar.String(max_length=200)
categories: Optional[Union[Category, List[Category]]] = ormar.ManyToMany(
Category, through=PostCategory
)
```
!!!tip
To read more about many-to-many relations visit [many-to-many][many-to-many] section
!!!tip
ManyToMany allows you to query the related models with [queryset-proxy][queryset-proxy].
It allows you to use `await post.categories.all()` but also `await category.posts.all()` to fetch data related only to specific post, category etc.
[foreign-keys]: ./foreign-key.md
[many-to-many]: ./many-to-many.md
[queryset-proxy]: ./queryset-proxy.md

View File

@ -0,0 +1,97 @@
# ManyToMany
`ManyToMany(to, through)` has required parameters `to` and `through` that takes target and relation `Model` classes.
Sqlalchemy column and Type are automatically taken from target `Model`.
* Sqlalchemy column: class of a target `Model` primary key column
* Type (used for pydantic): type of a target `Model`
## Defining Models
```Python hl_lines="32 49-50"
--8<-- "../docs_src/relations/docs002.py"
```
Create sample data:
```Python
guido = await Author.objects.create(first_name="Guido", last_name="Van Rossum")
post = await Post.objects.create(title="Hello, M2M", author=guido)
news = await Category.objects.create(name="News")
```
### add
```python
# Add a category to a post.
await post.categories.add(news)
# or from the other end:
await news.posts.add(post)
```
!!!warning
In all not None cases the primary key value for related model **has to exist in database**.
Otherwise an IntegrityError will be raised by your database driver library.
### remove
Removal of the related model one by one.
Removes also the relation in the database.
```python
await news.posts.remove(post)
```
### clear
Removal of all related models in one call.
Removes also the relation in the database.
```python
await news.posts.clear()
```
### QuerysetProxy
Reverse relation exposes QuerysetProxy API that allows you to query related model like you would issue a normal Query.
To read which methods of QuerySet are available read below [querysetproxy][querysetproxy]
## related_name
By default, the related_name is generated in the same way as for the `ForeignKey` relation (class.name.lower()+'s'),
but in the same way you can overwrite this name by providing `related_name` parameter like below:
```Python
categories: Optional[Union[Category, List[Category]]] = ormar.ManyToMany(
Category, through=PostCategory, related_name="new_categories"
)
```
!!!warning
When you provide multiple relations to the same model `ormar` can no longer auto generate
the `related_name` for you. Therefore, in that situation you **have to** provide `related_name`
for all but one (one can be default and generated) or all related fields.
[queries]: ./queries.md
[querysetproxy]: ./queryset-proxy.md
[get]: ./queries.md#get
[all]: ./queries.md#all
[create]: ./queries.md#create
[get_or_create]: ./queries.md#get_or_create
[update_or_create]: ./queries.md#update_or_create
[filter]: ./queries.md#filter
[exclude]: ./queries.md#exclude
[select_related]: ./queries.md#select_related
[prefetch_related]: ./queries.md#prefetch_related
[limit]: ./queries.md#limit
[offset]: ./queries.md#offset
[count]: ./queries.md#count
[exists]: ./queries.md#exists
[fields]: ./queries.md#fields
[exclude_fields]: ./queries.md#exclude_fields
[order_by]: ./queries.md#order_by

View File

@ -0,0 +1,203 @@
# QuerySetProxy
When access directly the related `ManyToMany` field as well as `ReverseForeignKey` returns the list of related models.
But at the same time it exposes subset of QuerySet API, so you can filter, create, select related etc related models directly from parent model.
!!!note
By default exposed QuerySet is already filtered to return only `Models` related to parent `Model`.
So if you issue `post.categories.all()` you will get all categories related to that post, not all in table.
!!!note
Note that when accessing QuerySet API methods through QuerysetProxy you don't
need to use `objects` attribute like in normal queries.
So note that it's `post.categories.all()` and **not** `post.categories.objects.all()`.
To learn more about available QuerySet methods visit [queries][queries]
!!!warning
Querying related models from ManyToMany cleans list of related models loaded on parent model:
Example: `post.categories.first()` will set post.categories to list of 1 related model -> the one returned by first()
Example 2: if post has 4 categories so `len(post.categories) == 4` calling `post.categories.limit(2).all()`
-> will load only 2 children and now `assert len(post.categories) == 2`
This happens for all QuerysetProxy methods returning data: `get`, `all` and `first` and in `get_or_create` if model already exists.
Note that value returned by `create` or created in `get_or_create` and `update_or_create`
if model does not exist will be added to relation list (not clearing it).
## get
`get(**kwargs): -> Model`
To grab just one of related models filtered by name you can use `get(**kwargs)` method.
```python
# grab one category
assert news == await post.categories.get(name="News")
# note that method returns the category so you can grab this value
# but it also modifies list of related models in place
# so regardless of what was previously loaded on parent model
# now it has only one value -> just loaded with get() call
assert len(post.categories) == 1
assert post.categories[0] == news
```
!!!tip
Read more in queries documentation [get][get]
## all
`all(**kwargs) -> List[Optional["Model"]]`
To get a list of related models use `all()` method.
Note that you can filter the queryset, select related, exclude fields etc. like in normal query.
```python
# with all Queryset methods - filtering, selecting columns, counting etc.
await news.posts.filter(title__contains="M2M").all()
await Category.objects.filter(posts__author=guido).get()
# columns models of many to many relation can be prefetched
news_posts = await news.posts.select_related("author").all()
assert news_posts[0].author == guido
```
!!!tip
Read more in queries documentation [all][all]
## create
`create(**kwargs): -> Model`
Create related `Model` directly from parent `Model`.
The link table is automatically populated, as well as relation ids in the database.
```python
# Creating columns object from instance:
await post.categories.create(name="Tips")
assert len(await post.categories.all()) == 2
# newly created instance already have relation persisted in the database
```
!!!tip
Read more in queries documentation [create][create]
## get_or_create
`get_or_create(**kwargs) -> Model`
!!!tip
Read more in queries documentation [get_or_create][get_or_create]
## update_or_create
`update_or_create(**kwargs) -> Model`
!!!tip
Read more in queries documentation [update_or_create][update_or_create]
## filter
`filter(**kwargs) -> QuerySet`
!!!tip
Read more in queries documentation [filter][filter]
## exclude
`exclude(**kwargs) -> QuerySet`
!!!tip
Read more in queries documentation [exclude][exclude]
## select_related
`select_related(related: Union[List, str]) -> QuerySet`
!!!tip
Read more in queries documentation [select_related][select_related]
## prefetch_related
`prefetch_related(related: Union[List, str]) -> QuerySet`
!!!tip
Read more in queries documentation [prefetch_related][prefetch_related]
## limit
`limit(limit_count: int) -> QuerySet`
!!!tip
Read more in queries documentation [limit][limit]
## offset
`offset(offset: int) -> QuerySet`
!!!tip
Read more in queries documentation [offset][offset]
## count
`count() -> int`
!!!tip
Read more in queries documentation [count][count]
## exists
`exists() -> bool`
!!!tip
Read more in queries documentation [exists][exists]
## fields
`fields(columns: Union[List, str, set, dict]) -> QuerySet`
!!!tip
Read more in queries documentation [fields][fields]
## exclude_fields
`exclude_fields(columns: Union[List, str, set, dict]) -> QuerySet`
!!!tip
Read more in queries documentation [exclude_fields][exclude_fields]
## order_by
`order_by(columns:Union[List, str]) -> QuerySet`
!!!tip
Read more in queries documentation [order_by][order_by]
[queries]: ../queries.md
[get]: ../queries.md#get
[all]: ../queries.md#all
[create]: ../queries.md#create
[get_or_create]: ../queries.md#get_or_create
[update_or_create]: ../queries.md#update_or_create
[filter]: ../queries.md#filter
[exclude]: ../queries.md#exclude
[select_related]: ../queries.md#select_related
[prefetch_related]: ../queries.md#prefetch_related
[limit]: ../queries.md#limit
[offset]: ../queries.md#offset
[count]: ../queries.md#count
[exists]: ../queries.md#exists
[fields]: ../queries.md#fields
[exclude_fields]: ../queries.md#exclude_fields
[order_by]: ../queries.md#order_by

View File

@ -1,3 +1,35 @@
# 0.8.0
## Breaking
* **Breaking:** `remove()` parent from child side in reverse ForeignKey relation now requires passing a relation `name`,
as the same model can be registered multiple times and `ormar` needs to know from which relation on the parent you want to remove the child.
* **Breaking:** applying `limit` and `offset` with `select_related` is by default applied only on the main table before the join -> meaning that not the total
number of rows is limited but just number of main models (first one in the query, the one used to construct it). You can still limit all rows from db response with `limit_raw_sql=True` flag on either `limit` or `offset` (or both)
* **Breaking:** issuing `first()` now fetches the first row ordered by the primary key asc (so first one inserted (can be different for non number primary keys - i.e. alphabetical order of string))
* **Breaking:** issuing `get()` **without any filters** now fetches the first row ordered by the primary key desc (so should be last one inserted (can be different for non number primary keys - i.e. alphabetical order of string))
* **Breaking (internal):** sqlalchemy columns kept at `Meta.columns` are no longer bind to table, so you cannot get the column straight from there
## Features
* Introduce **inheritance**. For now two types of inheritance are possible:
* **Mixins** - don't subclass `ormar.Model`, just define fields that are later used on different models (like `created_date` and `updated_date` on each child model), only actual models create tables, but those fields from mixins are added
* **Concrete table inheritance** - means that parent is marked as `abstract=True` in Meta class and each child has its own table with columns from the parent and own child columns, kind of similar to Mixins but parent also is a (an abstract) Model
* To read more check the docs on models -> inheritance section.
* QuerySet `first()` can be used with `prefetch_related`
## Fixes
* Fix minor bug in `order_by` for primary model order bys
* Fix in `prefetch_query` for multiple related_names for the same model.
* Fix using same `related_name` on different models leading to the same related `Model` overwriting each other, now `ModelDefinitionError` is raised and you need to change the name.
* Fix `order_by` overwriting conditions when multiple joins to the same table applied.
## Docs
* Split and cleanup in docs:
* Divide models section into subsections
* Divide relations section into subsections
* Divide fields section into subsections
* Add model inheritance section
* Add API (BETA) documentation
# 0.7.5 # 0.7.5
* Fix for wrong relation column name in many_to_many relation joins (fix [#73][#73]) * Fix for wrong relation column name in many_to_many relation joins (fix [#73][#73])

View File

@ -0,0 +1,17 @@
class Department(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Course(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
department: Optional[Union[Department, Dict]] = ormar.ForeignKey(Department)

View File

@ -3,9 +3,20 @@ site_description: A simple async ORM with fastapi in mind and pydantic validatio
nav: nav:
- Overview: index.md - Overview: index.md
- Installation: install.md - Installation: install.md
- Models: models.md - Models:
- Fields: fields.md - Definition: models/index.md
- Relations: relations.md - Inheritance: models/inheritance.md
- Methods: models/methods.md
- Migrations: models/migrations.md
- Internals: models/internals.md
- Fields:
- Common parameters: fields/common-parameters.md
- Fields types: fields/field-types.md
- Relations:
- relations/index.md
- relations/foreign-key.md
- relations/many-to-many.md
- relations/queryset-proxy.md
- Queries: queries.md - Queries: queries.md
- Signals: signals.md - Signals: signals.md
- Use with Fastapi: fastapi.md - Use with Fastapi: fastapi.md
@ -13,6 +24,53 @@ nav:
- PyCharm plugin: plugin.md - PyCharm plugin: plugin.md
- Contributing: contributing.md - Contributing: contributing.md
- Release Notes: releases.md - Release Notes: releases.md
- Api (BETA):
- Index: api/index.md
- Models:
- Helpers:
- api/models/helpers/models.md
- api/models/helpers/pydantic.md
- api/models/helpers/relations.md
- api/models/helpers/sqlalchemy.md
- Mixins:
- Alias Mixin: api/models/mixins/alias-mixin.md
- Excludable Mixin: api/models/mixins/excludable-mixin.md
- Merge Model Mixin: api/models/mixins/merge-model-mixin.md
- Prefetch Query Mixin: api/models/mixins/prefetch-query-mixin.md
- Relation Mixin: api/models/mixins/relation-mixin.md
- Save Prepare Mixin: api/models/mixins/save-prepare-mixin.md
- api/models/model.md
- New BaseModel: api/models/new-basemodel.md
- Model Table Proxy: api/models/model-table-proxy.md
- Model Metaclass: api/models/model-metaclass.md
- Fields:
- Base Field: api/fields/base-field.md
- Model Fields: api/fields/model-fields.md
- Foreign Key: api/fields/foreign-key.md
- Many To Many: api/fields/many-to-many.md
- api/fields/decorators.md
- Query Set:
- Query Set: api/query-set/query-set.md
- api/query-set/query.md
- Prefetch Query: api/query-set/prefetch-query.md
- api/query-set/join.md
- api/query-set/clause.md
- Filter Query: api/query-set/filter-query.md
- Order Query: api/query-set/order-query.md
- Limit Query: api/query-set/limit-query.md
- Offset Query: api/query-set/offset-query.md
- api/query-set/utils.md
- Relations:
- Relation Manager: api/relations/relation-manager.md
- api/relations/relation.md
- Relation Proxy: api/relations/relation-proxy.md
- Queryset Proxy: api/relations/queryset-proxy.md
- Alias Manager: api/relations/alias-manager.md
- api/relations/utils.md
- Signals:
- api/signals/signal.md
- api/signals/decorators.md
- Exceptions: api/exceptions.md
repo_name: collerek/ormar repo_name: collerek/ormar
repo_url: https://github.com/collerek/ormar repo_url: https://github.com/collerek/ormar
google_analytics: google_analytics:

View File

@ -1,3 +1,24 @@
"""
The `ormar` package is an async mini ORM for Python, with support for **Postgres,
MySQL**, and **SQLite**.
The main benefit of using `ormar` are:
* getting an **async ORM that can be used with async frameworks**
(fastapi, starlette etc.)
* getting just **one model to maintain** - you don't have to maintain pydantic
and other orm model (sqlalchemy, peewee, gino etc.)
The goal was to create a simple ORM that can be **used directly
(as request and response models)
with `fastapi`** that bases it's data validation on pydantic.
Ormar - apart form obvious ORM in name - get it's name from ormar in swedish which means
snakes, and ormar(e) in italian which means cabinet.
And what's a better name for python ORM than snakes cabinet :)
"""
from ormar.decorators import ( from ormar.decorators import (
post_delete, post_delete,
post_save, post_save,
@ -44,7 +65,7 @@ class UndefinedType: # pragma no cover
Undefined = UndefinedType() Undefined = UndefinedType()
__version__ = "0.7.5" __version__ = "0.8.0"
__all__ = [ __all__ = [
"Integer", "Integer",
"BigInteger", "BigInteger",

View File

@ -2,6 +2,7 @@
Module with all decorators that are exposed for users. Module with all decorators that are exposed for users.
Currently only: Currently only:
* property_field - exposing @property like function as field in Model.dict() * property_field - exposing @property like function as field in Model.dict()
* predefined signals decorators (pre/post + save/update/delete) * predefined signals decorators (pre/post + save/update/delete)

View File

@ -13,7 +13,7 @@ def property_field(func: Callable) -> Union[property, Callable]:
mypy validation will complain about this. mypy validation will complain about this.
Note that "fields" exposed like this do not go through validation. Note that "fields" exposed like this do not go through validation.
:raises: ModelDefinitionError if method has any other argument than self. :raises ModelDefinitionError: if method has any other argument than self.
:param func: decorated function to be exposed :param func: decorated function to be exposed
:type func: Callable :type func: Callable
:return: decorated function passed in func param, with set __property_field__ = True :return: decorated function passed in func param, with set __property_field__ = True

View File

@ -1,3 +1,8 @@
"""
Gathers all exceptions thrown by ormar.
"""
class AsyncOrmException(Exception): class AsyncOrmException(Exception):
""" """
Base ormar Exception Base ormar Exception
@ -8,7 +13,8 @@ class AsyncOrmException(Exception):
class ModelDefinitionError(AsyncOrmException): class ModelDefinitionError(AsyncOrmException):
""" """
Raised for errors related to the model definition itself. Raised for errors related to the model definition itself:
* setting @property_field on method with arguments other than func(self) * setting @property_field on method with arguments other than func(self)
* defining a Field without required parameters * defining a Field without required parameters
* defining a model with more than one primary_key * defining a model with more than one primary_key
@ -46,7 +52,8 @@ class MultipleMatches(AsyncOrmException):
class QueryDefinitionError(AsyncOrmException): class QueryDefinitionError(AsyncOrmException):
""" """
Raised for errors in query definition. Raised for errors in query definition:
* using contains or icontains filter with instance of the Model * using contains or icontains filter with instance of the Model
* using Queryset.update() without filter and setting each flag to True * using Queryset.update() without filter and setting each flag to True
* using Queryset.delete() without filter and setting each flag to True * using Queryset.delete() without filter and setting each flag to True

View File

@ -197,6 +197,23 @@ class BaseField(FieldInfo):
return cls.autoincrement return cls.autoincrement
return False return False
@classmethod
def construct_constraints(cls) -> List:
"""
Converts list of ormar constraints into sqlalchemy ForeignKeys.
Has to be done dynamically as sqlalchemy binds ForeignKey to the table.
And we need a new ForeignKey for subclasses of current model
:return: List of sqlalchemy foreign keys - by default one.
:rtype: List[sqlalchemy.schema.ForeignKey]
"""
return [
sqlalchemy.schema.ForeignKey(
con.name, ondelete=con.ondelete, onupdate=con.onupdate
)
for con in cls.constraints
]
@classmethod @classmethod
def get_column(cls, name: str) -> sqlalchemy.Column: def get_column(cls, name: str) -> sqlalchemy.Column:
""" """
@ -212,7 +229,7 @@ class BaseField(FieldInfo):
return sqlalchemy.Column( return sqlalchemy.Column(
cls.alias or name, cls.alias or name,
cls.column_type, cls.column_type,
*cls.constraints, *cls.construct_constraints(),
primary_key=cls.primary_key, primary_key=cls.primary_key,
nullable=cls.nullable and not cls.primary_key, nullable=cls.nullable and not cls.primary_key,
index=cls.index, index=cls.index,

View File

@ -1,7 +1,7 @@
import uuid import uuid
from dataclasses import dataclass
from typing import Any, List, Optional, TYPE_CHECKING, Type, Union from typing import Any, List, Optional, TYPE_CHECKING, Type, Union
import sqlalchemy
from pydantic import BaseModel, create_model from pydantic import BaseModel, create_model
from sqlalchemy import UniqueConstraint from sqlalchemy import UniqueConstraint
@ -15,6 +15,24 @@ if TYPE_CHECKING: # pragma no cover
def create_dummy_instance(fk: Type["Model"], pk: Any = None) -> "Model": def create_dummy_instance(fk: Type["Model"], pk: Any = None) -> "Model":
"""
Ormar never returns you a raw data.
So if you have a related field that has a value populated
it will construct you a Model instance out of it.
Creates a "fake" instance of passed Model from pk value.
The instantiated Model has only pk value filled.
To achieve this __pk_only__ flag has to be passed as it skips the validation.
If the nested related Models are required they are set with -1 as pk value.
:param fk: class of the related Model to which instance should be constructed
:type fk: Model class
:param pk: value of the primary_key column
:type pk: Any
:return: Model instance populated with only pk
:rtype: Model
"""
init_dict = { init_dict = {
**{fk.Meta.pkname: pk or -1, "__pk_only__": True}, **{fk.Meta.pkname: pk or -1, "__pk_only__": True},
**{ **{
@ -30,6 +48,17 @@ def create_dummy_model(
base_model: Type["Model"], base_model: Type["Model"],
pk_field: Type[Union[BaseField, "ForeignKeyField", "ManyToManyField"]], pk_field: Type[Union[BaseField, "ForeignKeyField", "ManyToManyField"]],
) -> Type["BaseModel"]: ) -> Type["BaseModel"]:
"""
Used to construct a dummy pydantic model for type hints and pydantic validation.
Populates only pk field and set it to desired type.
:param base_model: class of target dummy model
:type base_model: Model class
:param pk_field: ormar Field to be set on pydantic Model
:type pk_field: Type[Union[BaseField, "ForeignKeyField", "ManyToManyField"]]
:return: constructed dummy model
:rtype: pydantic.BaseModel
"""
fields = {f"{pk_field.name}": (pk_field.__type__, None)} fields = {f"{pk_field.name}": (pk_field.__type__, None)}
dummy_model = create_model( dummy_model = create_model(
f"PkOnly{base_model.get_name(lower=False)}", **fields # type: ignore f"PkOnly{base_model.get_name(lower=False)}", **fields # type: ignore
@ -38,7 +67,22 @@ def create_dummy_model(
class UniqueColumns(UniqueConstraint): class UniqueColumns(UniqueConstraint):
pass """
Subclass of sqlalchemy.UniqueConstraint.
Used to avoid importing anything from sqlalchemy by user.
"""
@dataclass
class ForeignKeyConstraint:
"""
Internal container to store ForeignKey definitions used later
to produce sqlalchemy.ForeignKeys
"""
name: str
ondelete: str
onupdate: str
def ForeignKey( # noqa CFQ002 def ForeignKey( # noqa CFQ002
@ -53,6 +97,36 @@ def ForeignKey( # noqa CFQ002
ondelete: str = None, ondelete: str = None,
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
"""
Despite a name it's a function that returns constructed ForeignKeyField.
This function is actually used in model declaration (as ormar.ForeignKey(ToModel)).
Accepts number of relation setting parameters as well as all BaseField ones.
:param to: target related ormar Model
:type to: Model class
:param name: name of the database field - later called alias
:type name: str
:param unique: parameter passed to sqlalchemy.ForeignKey, unique flag
:type unique: bool
:param nullable: marks field as optional/ required
:type nullable: bool
:param related_name: name of reversed FK relation populated for you on to model
:type related_name: str
:param virtual: marks if relation is virtual.
It is for reversed FK and auto generated FK on through model in Many2Many relations.
:type virtual: bool
:param onupdate: parameter passed to sqlalchemy.ForeignKey.
How to treat child rows on update of parent (the one where FK is defined) model.
:type onupdate: str
:param ondelete: parameter passed to sqlalchemy.ForeignKey.
How to treat child rows on delete of parent (the one where FK is defined) model.
:type ondelete: str
:param kwargs: all other args to be populated by BaseField
:type kwargs: Any
:return: ormar ForeignKeyField with relation to selected model
:rtype: ForeignKeyField
"""
fk_string = to.Meta.tablename + "." + to.get_column_alias(to.Meta.pkname) fk_string = to.Meta.tablename + "." + to.get_column_alias(to.Meta.pkname)
to_field = to.Meta.model_fields[to.Meta.pkname] to_field = to.Meta.model_fields[to.Meta.pkname]
pk_only_model = create_dummy_model(to, to_field) pk_only_model = create_dummy_model(to, to_field)
@ -68,8 +142,8 @@ def ForeignKey( # noqa CFQ002
name=kwargs.pop("real_name", None), name=kwargs.pop("real_name", None),
nullable=nullable, nullable=nullable,
constraints=[ constraints=[
sqlalchemy.schema.ForeignKey( ForeignKeyConstraint(
fk_string, ondelete=ondelete, onupdate=onupdate name=fk_string, ondelete=ondelete, onupdate=onupdate # type: ignore
) )
], ],
unique=unique, unique=unique,
@ -87,6 +161,10 @@ def ForeignKey( # noqa CFQ002
class ForeignKeyField(BaseField): class ForeignKeyField(BaseField):
"""
Actual class returned from ForeignKey function call and stored in model_fields.
"""
to: Type["Model"] to: Type["Model"]
name: str name: str
related_name: str related_name: str
@ -96,6 +174,21 @@ class ForeignKeyField(BaseField):
def _extract_model_from_sequence( def _extract_model_from_sequence(
cls, value: List, child: "Model", to_register: bool, relation_name: str cls, value: List, child: "Model", to_register: bool, relation_name: str
) -> List["Model"]: ) -> List["Model"]:
"""
Takes a list of Models and registers them on parent.
Registration is mutual, so children have also reference to parent.
Used in reverse FK relations.
:param value: list of Model
:type value: List
:param child: child/ related Model
:type child: Model
:param to_register: flag if the relation should be set in RelationshipManager
:type to_register: bool
:return: list (if needed) registered Models
:rtype: List["Model"]
"""
return [ return [
cls.expand_relationship( # type: ignore cls.expand_relationship( # type: ignore
value=val, value=val,
@ -110,6 +203,21 @@ class ForeignKeyField(BaseField):
def _register_existing_model( def _register_existing_model(
cls, value: "Model", child: "Model", to_register: bool, relation_name: str cls, value: "Model", child: "Model", to_register: bool, relation_name: str
) -> "Model": ) -> "Model":
"""
Takes already created instance and registers it for parent.
Registration is mutual, so children have also reference to parent.
Used in reverse FK relations and normal FK for single models.
:param value: already instantiated Model
:type value: Model
:param child: child/ related Model
:type child: Model
:param to_register: flag if the relation should be set in RelationshipManager
:type to_register: bool
:return: (if needed) registered Model
:rtype: Model
"""
if to_register: if to_register:
cls.register_relation(model=value, child=child, relation_name=relation_name) cls.register_relation(model=value, child=child, relation_name=relation_name)
return value return value
@ -118,6 +226,22 @@ class ForeignKeyField(BaseField):
def _construct_model_from_dict( def _construct_model_from_dict(
cls, value: dict, child: "Model", to_register: bool, relation_name: str cls, value: dict, child: "Model", to_register: bool, relation_name: str
) -> "Model": ) -> "Model":
"""
Takes a dictionary, creates a instance and registers it for parent.
If dictionary contains only one field and it's a pk it is a __pk_only__ model.
Registration is mutual, so children have also reference to parent.
Used in normal FK for dictionaries.
:param value: dictionary of a Model
:type value: dict
:param child: child/ related Model
:type child: Model
:param to_register: flag if the relation should be set in RelationshipManager
:type to_register: bool
:return: (if needed) registered Model
:rtype: Model
"""
if len(value.keys()) == 1 and list(value.keys())[0] == cls.to.Meta.pkname: if len(value.keys()) == 1 and list(value.keys())[0] == cls.to.Meta.pkname:
value["__pk_only__"] = True value["__pk_only__"] = True
model = cls.to(**value) model = cls.to(**value)
@ -129,6 +253,21 @@ class ForeignKeyField(BaseField):
def _construct_model_from_pk( def _construct_model_from_pk(
cls, value: Any, child: "Model", to_register: bool, relation_name: str cls, value: Any, child: "Model", to_register: bool, relation_name: str
) -> "Model": ) -> "Model":
"""
Takes a pk value, creates a dummy instance and registers it for parent.
Registration is mutual, so children have also reference to parent.
Used in normal FK for dictionaries.
:param value: value of a related pk / fk column
:type value: Any
:param child: child/ related Model
:type child: Model
:param to_register: flag if the relation should be set in RelationshipManager
:type to_register: bool
:return: (if needed) registered Model
:rtype: Model
"""
if cls.to.pk_type() == uuid.UUID and isinstance(value, str): if cls.to.pk_type() == uuid.UUID and isinstance(value, str):
value = uuid.UUID(value) value = uuid.UUID(value)
if not isinstance(value, cls.to.pk_type()): if not isinstance(value, cls.to.pk_type()):
@ -146,6 +285,18 @@ class ForeignKeyField(BaseField):
def register_relation( def register_relation(
cls, model: "Model", child: "Model", relation_name: str cls, model: "Model", child: "Model", relation_name: str
) -> None: ) -> None:
"""
Registers relation between parent and child in relation manager.
Relation manager is kep on each model (different instance).
Used in Metaclass and sometimes some relations are missing
(i.e. cloned Models in fastapi might miss one).
:param model: parent model (with relation definition)
:type model: Model class
:param child: child model
:type child: Model class
"""
model._orm.add( model._orm.add(
parent=model, parent=model,
child=child, child=child,
@ -162,6 +313,23 @@ class ForeignKeyField(BaseField):
to_register: bool = True, to_register: bool = True,
relation_name: str = None, relation_name: str = None,
) -> Optional[Union["Model", List["Model"]]]: ) -> Optional[Union["Model", List["Model"]]]:
"""
For relations the child model is first constructed (if needed),
registered in relation and returned.
For relation fields the value can be a pk value (Any type of field),
dict (from Model) or actual instance/list of a "Model".
Selects the appropriate constructor based on a passed value.
:param value: a Model field value, returned untouched for non relation fields.
:type value: Any
:param child: a child Model to register
:type child: Union["Model", "NewBaseModel"]
:param to_register: flag if the relation should be set in RelationshipManager
:type to_register: bool
:return: returns a Model or a list of Models
:rtype: Optional[Union["Model", List["Model"]]]
"""
if value is None: if value is None:
return None if not cls.virtual else [] return None if not cls.virtual else []
constructors = { constructors = {

View File

@ -19,6 +19,29 @@ def ManyToMany(
virtual: bool = False, virtual: bool = False,
**kwargs: Any **kwargs: Any
) -> Any: ) -> Any:
"""
Despite a name it's a function that returns constructed ManyToManyField.
This function is actually used in model declaration
(as ormar.ManyToMany(ToModel, through=ThroughModel)).
Accepts number of relation setting parameters as well as all BaseField ones.
:param to: target related ormar Model
:type to: Model class
:param through: through model for m2m relation
:type through: Model class
:param name: name of the database field - later called alias
:type name: str
:param unique: parameter passed to sqlalchemy.ForeignKey, unique flag
:type unique: bool
:param virtual: marks if relation is virtual.
It is for reversed FK and auto generated FK on through model in Many2Many relations.
:type virtual: bool
:param kwargs: all other args to be populated by BaseField
:type kwargs: Any
:return: ormar ManyToManyField with m2m relation to selected model
:rtype: ManyToManyField
"""
to_field = to.Meta.model_fields[to.Meta.pkname] to_field = to.Meta.model_fields[to.Meta.pkname]
related_name = kwargs.pop("related_name", None) related_name = kwargs.pop("related_name", None)
nullable = kwargs.pop("nullable", True) nullable = kwargs.pop("nullable", True)
@ -49,4 +72,17 @@ def ManyToMany(
class ManyToManyField(ForeignKeyField, ormar.QuerySetProtocol, ormar.RelationProtocol): class ManyToManyField(ForeignKeyField, ormar.QuerySetProtocol, ormar.RelationProtocol):
"""
Actual class returned from ManyToMany function call and stored in model_fields.
"""
through: Type["Model"] through: Type["Model"]
@classmethod
def default_target_field_name(cls) -> str:
"""
Returns default target model name on through model.
:return: name of the field
:rtype: str
"""
return cls.to.get_name()

View File

@ -17,6 +17,20 @@ def is_field_nullable(
server_default: Any, server_default: Any,
pydantic_only: Optional[bool], pydantic_only: Optional[bool],
) -> bool: ) -> bool:
"""
Checks if the given field should be nullable/ optional based on parameters given.
:param nullable: flag explicit setting a column as nullable
:type nullable: Optional[bool]
:param default: value or function to be called as default in python
:type default: Any
:param server_default: function to be called as default by sql server
:type server_default: Any
:param pydantic_only: flag if fields should not be included in the sql table
:type pydantic_only: Optional[bool]
:return: result of the check
:rtype: bool
"""
if nullable is None: if nullable is None:
return ( return (
default is not None default is not None
@ -27,10 +41,24 @@ def is_field_nullable(
def is_auto_primary_key(primary_key: bool, autoincrement: bool) -> bool: def is_auto_primary_key(primary_key: bool, autoincrement: bool) -> bool:
"""
Checks if field is an autoincrement pk -> if yes it's optional.
:param primary_key: flag if field is a pk field
:type primary_key: bool
:param autoincrement: flag if field should be autoincrement
:type autoincrement: bool
:return: result of the check
:rtype: bool
"""
return primary_key and autoincrement return primary_key and autoincrement
class ModelFieldFactory: class ModelFieldFactory:
"""
Default field factory that construct Field classes and populated their values.
"""
_bases: Any = (BaseField,) _bases: Any = (BaseField,)
_type: Any = None _type: Any = None
@ -66,14 +94,31 @@ class ModelFieldFactory:
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: # pragma no cover def get_column_type(cls, **kwargs: Any) -> Any: # pragma no cover
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return None return None
@classmethod @classmethod
def validate(cls, **kwargs: Any) -> None: # pragma no cover def validate(cls, **kwargs: Any) -> None: # pragma no cover
pass """
Used to validate if all required parameters on a given field type are set.
:param kwargs: all params passed during construction
:type kwargs: Any
"""
class String(ModelFieldFactory, str): class String(ModelFieldFactory, str):
"""
String field factory that construct Field classes and populated their values.
"""
_type = str _type = str
def __new__( # type: ignore # noqa CFQ002 def __new__( # type: ignore # noqa CFQ002
@ -100,10 +145,24 @@ class String(ModelFieldFactory, str):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.String(length=kwargs.get("max_length")) return sqlalchemy.String(length=kwargs.get("max_length"))
@classmethod @classmethod
def validate(cls, **kwargs: Any) -> None: def validate(cls, **kwargs: Any) -> None:
"""
Used to validate if all required parameters on a given field type are set.
:param kwargs: all params passed during construction
:type kwargs: Any
"""
max_length = kwargs.get("max_length", None) max_length = kwargs.get("max_length", None)
if max_length is None or max_length <= 0: if max_length is None or max_length <= 0:
raise ModelDefinitionError( raise ModelDefinitionError(
@ -112,6 +171,10 @@ class String(ModelFieldFactory, str):
class Integer(ModelFieldFactory, int): class Integer(ModelFieldFactory, int):
"""
Integer field factory that construct Field classes and populated their values.
"""
_type = int _type = int
def __new__( # type: ignore def __new__( # type: ignore
@ -142,10 +205,23 @@ class Integer(ModelFieldFactory, int):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Integer() return sqlalchemy.Integer()
class Text(ModelFieldFactory, str): class Text(ModelFieldFactory, str):
"""
Text field factory that construct Field classes and populated their values.
"""
_type = str _type = str
def __new__( # type: ignore def __new__( # type: ignore
@ -164,10 +240,23 @@ class Text(ModelFieldFactory, str):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Text() return sqlalchemy.Text()
class Float(ModelFieldFactory, float): class Float(ModelFieldFactory, float):
"""
Float field factory that construct Field classes and populated their values.
"""
_type = float _type = float
def __new__( # type: ignore def __new__( # type: ignore
@ -192,6 +281,15 @@ class Float(ModelFieldFactory, float):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Float() return sqlalchemy.Float()
@ -204,46 +302,115 @@ if TYPE_CHECKING: # pragma: nocover
else: else:
class Boolean(ModelFieldFactory, int): class Boolean(ModelFieldFactory, int):
"""
Boolean field factory that construct Field classes and populated their values.
"""
_type = bool _type = bool
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Boolean() return sqlalchemy.Boolean()
class DateTime(ModelFieldFactory, datetime.datetime): class DateTime(ModelFieldFactory, datetime.datetime):
"""
DateTime field factory that construct Field classes and populated their values.
"""
_type = datetime.datetime _type = datetime.datetime
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.DateTime() return sqlalchemy.DateTime()
class Date(ModelFieldFactory, datetime.date): class Date(ModelFieldFactory, datetime.date):
"""
Date field factory that construct Field classes and populated their values.
"""
_type = datetime.date _type = datetime.date
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Date() return sqlalchemy.Date()
class Time(ModelFieldFactory, datetime.time): class Time(ModelFieldFactory, datetime.time):
"""
Time field factory that construct Field classes and populated their values.
"""
_type = datetime.time _type = datetime.time
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Time() return sqlalchemy.Time()
class JSON(ModelFieldFactory, pydantic.Json): class JSON(ModelFieldFactory, pydantic.Json):
"""
JSON field factory that construct Field classes and populated their values.
"""
_type = pydantic.Json _type = pydantic.Json
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.JSON() return sqlalchemy.JSON()
class BigInteger(Integer, int): class BigInteger(Integer, int):
"""
BigInteger field factory that construct Field classes and populated their values.
"""
_type = int _type = int
def __new__( # type: ignore def __new__( # type: ignore
@ -274,10 +441,23 @@ class BigInteger(Integer, int):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.BigInteger() return sqlalchemy.BigInteger()
class Decimal(ModelFieldFactory, decimal.Decimal): class Decimal(ModelFieldFactory, decimal.Decimal):
"""
Decimal field factory that construct Field classes and populated their values.
"""
_type = decimal.Decimal _type = decimal.Decimal
def __new__( # type: ignore # noqa CFQ002 def __new__( # type: ignore # noqa CFQ002
@ -317,12 +497,26 @@ class Decimal(ModelFieldFactory, decimal.Decimal):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
precision = kwargs.get("precision") precision = kwargs.get("precision")
scale = kwargs.get("scale") scale = kwargs.get("scale")
return sqlalchemy.DECIMAL(precision=precision, scale=scale) return sqlalchemy.DECIMAL(precision=precision, scale=scale)
@classmethod @classmethod
def validate(cls, **kwargs: Any) -> None: def validate(cls, **kwargs: Any) -> None:
"""
Used to validate if all required parameters on a given field type are set.
:param kwargs: all params passed during construction
:type kwargs: Any
"""
precision = kwargs.get("precision") precision = kwargs.get("precision")
scale = kwargs.get("scale") scale = kwargs.get("scale")
if precision is None or precision < 0 or scale is None or scale < 0: if precision is None or precision < 0 or scale is None or scale < 0:
@ -332,6 +526,10 @@ class Decimal(ModelFieldFactory, decimal.Decimal):
class UUID(ModelFieldFactory, uuid.UUID): class UUID(ModelFieldFactory, uuid.UUID):
"""
UUID field factory that construct Field classes and populated their values.
"""
_type = uuid.UUID _type = uuid.UUID
def __new__( # type: ignore # noqa CFQ002 def __new__( # type: ignore # noqa CFQ002
@ -350,5 +548,14 @@ class UUID(ModelFieldFactory, uuid.UUID):
@classmethod @classmethod
def get_column_type(cls, **kwargs: Any) -> Any: def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
uuid_format = kwargs.get("uuid_format", "hex") uuid_format = kwargs.get("uuid_format", "hex")
return sqlalchemy_uuid.UUID(uuid_format=uuid_format) return sqlalchemy_uuid.UUID(uuid_format=uuid_format)

View File

@ -7,10 +7,11 @@ from sqlalchemy.types import TypeDecorator
class UUID(TypeDecorator): # pragma nocover class UUID(TypeDecorator): # pragma nocover
"""Platform-independent GUID type. """
Platform-independent GUID type.
Uses CHAR(36) if in a string mode, otherwise uses CHAR(32), to store UUID. Uses CHAR(36) if in a string mode, otherwise uses CHAR(32), to store UUID.
For details for different methods check documentation of parent class.
""" """
impl = CHAR impl = CHAR
@ -25,6 +26,14 @@ class UUID(TypeDecorator): # pragma nocover
return "CHAR(32)" return "CHAR(32)"
def _cast_to_uuid(self, value: Union[str, int, bytes]) -> uuid.UUID: def _cast_to_uuid(self, value: Union[str, int, bytes]) -> uuid.UUID:
"""
Parses given value into uuid.UUID field.
:param value: value to be parsed
:type value: Union[str, int, bytes]
:return: initialized uuid
:rtype: uuid.UUID
"""
if not isinstance(value, uuid.UUID): if not isinstance(value, uuid.UUID):
if isinstance(value, bytes): if isinstance(value, bytes):
ret_value = uuid.UUID(bytes=value) ret_value = uuid.UUID(bytes=value)

View File

@ -1,5 +1,10 @@
"""
Definition of Model, it's parents NewBaseModel and mixins used by models.
Also defines a Metaclass that handles all constructions and relations registration,
ass well as vast number of helper functions for pydantic, sqlalchemy and relations.
"""
from ormar.models.newbasemodel import NewBaseModel # noqa I100 from ormar.models.newbasemodel import NewBaseModel # noqa I100
from ormar.models.model import Model # noqa I100 from ormar.models.model import Model # noqa I100
from ormar.models.metaclass import expand_reverse_relationships # noqa I100
__all__ = ["NewBaseModel", "Model", "expand_reverse_relationships"] __all__ = ["NewBaseModel", "Model"]

View File

@ -1,49 +0,0 @@
from typing import Dict, Set, Union
class Excludable:
@staticmethod
def get_child(
items: Union[Set, Dict, None], key: str = None
) -> Union[Set, Dict, None]:
if isinstance(items, dict):
return items.get(key, {})
return items
@staticmethod
def get_excluded(
exclude: Union[Set, Dict, None], key: str = None
) -> Union[Set, Dict, None]:
return Excludable.get_child(items=exclude, key=key)
@staticmethod
def get_included(
include: Union[Set, Dict, None], key: str = None
) -> Union[Set, Dict, None]:
return Excludable.get_child(items=include, key=key)
@staticmethod
def is_excluded(exclude: Union[Set, Dict, None], key: str = None) -> bool:
if exclude is None:
return False
if exclude is Ellipsis: # pragma: nocover
return True
to_exclude = Excludable.get_excluded(exclude=exclude, key=key)
if isinstance(to_exclude, Set):
return key in to_exclude
if to_exclude is ...:
return True
return False
@staticmethod
def is_included(include: Union[Set, Dict, None], key: str = None) -> bool:
if include is None:
return True
if include is Ellipsis:
return True
to_include = Excludable.get_included(include=include, key=key)
if isinstance(to_include, Set):
return key in to_include
if to_include is ...:
return True
return False

View File

@ -0,0 +1,31 @@
from ormar.models.helpers.models import (
extract_annotations_and_default_vals,
populate_default_options_values,
)
from ormar.models.helpers.pydantic import (
get_potential_fields,
get_pydantic_base_orm_config,
get_pydantic_field,
)
from ormar.models.helpers.relations import (
alias_manager,
register_relation_in_alias_manager,
)
from ormar.models.helpers.relations import expand_reverse_relationships
from ormar.models.helpers.sqlalchemy import (
populate_meta_sqlalchemy_table_if_required,
populate_meta_tablename_columns_and_pk,
)
__all__ = [
"expand_reverse_relationships",
"extract_annotations_and_default_vals",
"populate_meta_tablename_columns_and_pk",
"populate_meta_sqlalchemy_table_if_required",
"populate_default_options_values",
"alias_manager",
"register_relation_in_alias_manager",
"get_pydantic_field",
"get_potential_fields",
"get_pydantic_base_orm_config",
]

View File

@ -0,0 +1,82 @@
from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Type
import ormar
from ormar.fields.foreign_key import ForeignKeyField
from ormar.models.helpers.pydantic import populate_pydantic_default_values
if TYPE_CHECKING: # pragma no cover
from ormar import Model
def populate_default_options_values(
new_model: Type["Model"], model_fields: Dict
) -> None:
"""
Sets all optional Meta values to it's defaults
and set model_fields that were already previously extracted.
Here should live all options that are not overwritten/set for all models.
Current options are:
* constraints = []
* abstract = False
:param new_model: newly constructed Model
:type new_model: Model class
:param model_fields:
:type model_fields: Union[Dict[str, type], Dict]
"""
if not hasattr(new_model.Meta, "constraints"):
new_model.Meta.constraints = []
if not hasattr(new_model.Meta, "model_fields"):
new_model.Meta.model_fields = model_fields
if not hasattr(new_model.Meta, "abstract"):
new_model.Meta.abstract = False
def extract_annotations_and_default_vals(attrs: Dict) -> Tuple[Dict, Dict]:
"""
Extracts annotations from class namespace dict and triggers
extraction of ormar model_fields.
:param attrs: namespace of the class created
:type attrs: Dict
:return: namespace of the class updated, dict of extracted model_fields
:rtype: Tuple[Dict, Dict]
"""
key = "__annotations__"
attrs[key] = attrs.get(key, {})
attrs, model_fields = populate_pydantic_default_values(attrs)
return attrs, model_fields
# cannot be in relations helpers due to cyclical import
def validate_related_names_in_relations(
model_fields: Dict, new_model: Type["Model"]
) -> None:
"""
Performs a validation of relation_names in relation fields.
If multiple fields are leading to the same related model
only one can have empty related_name param
(populated by default as model.name.lower()+'s').
Also related_names have to be unique for given related model.
:raises ModelDefinitionError: if validation of related_names fail
:param model_fields: dictionary of declared ormar model fields
:type model_fields: Dict[str, ormar.Field]
:param new_model:
:type new_model: Model class
"""
already_registered: Dict[str, List[Optional[str]]] = dict()
for field in model_fields.values():
if issubclass(field, ForeignKeyField):
previous_related_names = already_registered.setdefault(field.to, [])
if field.related_name in previous_related_names:
raise ormar.ModelDefinitionError(
f"Multiple fields declared on {new_model.get_name(lower=False)} "
f"model leading to {field.to.get_name(lower=False)} model without "
f"related_name property set. \nThere can be only one relation with "
f"default/empty name: '{new_model.get_name() + 's'}'"
f"\nTip: provide different related_name for FK and/or M2M fields"
)
previous_related_names.append(field.related_name)

View File

@ -0,0 +1,158 @@
import warnings
from typing import Dict, Optional, TYPE_CHECKING, Tuple, Type
from pydantic import BaseConfig
from pydantic.fields import ModelField
from pydantic.utils import lenient_issubclass
import ormar # noqa: I100, I202
from ormar.fields import BaseField, ManyToManyField
if TYPE_CHECKING: # pragma no cover
from ormar import Model
def create_pydantic_field(
field_name: str, model: Type["Model"], model_field: Type[ManyToManyField]
) -> None:
"""
Registers pydantic field on through model that leads to passed model
and is registered as field_name passed.
Through model is fetched from through attributed on passed model_field.
:param field_name: field name to register
:type field_name: str
:param model: type of field to register
:type model: Model class
:param model_field: relation field from which through model is extracted
:type model_field: ManyToManyField class
"""
model_field.through.__fields__[field_name] = ModelField(
name=field_name,
type_=model,
model_config=model.__config__,
required=False,
class_validators={},
)
def get_pydantic_field(field_name: str, model: Type["Model"]) -> "ModelField":
"""
Extracts field type and if it's required from Model model_fields by passed
field_name. Returns a pydantic field with type of field_name field type.
:param field_name: field name to fetch from Model and name of pydantic field
:type field_name: str
:param model: type of field to register
:type model: Model class
:return: newly created pydantic field
:rtype: pydantic.ModelField
"""
return ModelField(
name=field_name,
type_=model.Meta.model_fields[field_name].__type__, # type: ignore
model_config=model.__config__,
required=not model.Meta.model_fields[field_name].nullable,
class_validators={},
)
def populate_default_pydantic_field_value(
ormar_field: Type[BaseField], field_name: str, attrs: dict
) -> dict:
"""
Grabs current value of the ormar Field in class namespace
(so the default_value declared on ormar model if set)
and converts it to pydantic.FieldInfo
that pydantic is able to extract later.
On FieldInfo there are saved all needed params like max_length of the string
and other constraints that pydantic can use to build
it's own field validation used by ormar.
:param ormar_field: field to convert
:type ormar_field: ormar Field
:param field_name: field to convert name
:type field_name: str
:param attrs: current class namespace
:type attrs: Dict
:return: updated namespace dict
:rtype: Dict
"""
curr_def_value = attrs.get(field_name, ormar.Undefined)
if lenient_issubclass(curr_def_value, ormar.fields.BaseField):
curr_def_value = ormar.Undefined
if curr_def_value is None:
attrs[field_name] = ormar_field.convert_to_pydantic_field_info(allow_null=True)
else:
attrs[field_name] = ormar_field.convert_to_pydantic_field_info()
return attrs
def populate_pydantic_default_values(attrs: Dict) -> Tuple[Dict, Dict]:
"""
Extracts ormar fields from annotations (deprecated) and from namespace
dictionary of the class. Fields declared on model are all subclasses of the
BaseField class.
Trigger conversion of ormar field into pydantic FieldInfo, which has all needed
paramaters saved.
Overwrites the annotations of ormar fields to corresponding types declared on
ormar fields (constructed dynamically for relations).
Those annotations are later used by pydantic to construct it's own fields.
:param attrs: current class namespace
:type attrs: Dict
:return: namespace of the class updated, dict of extracted model_fields
:rtype: Tuple[Dict, Dict]
"""
model_fields = {}
potential_fields = {
k: v
for k, v in attrs["__annotations__"].items()
if lenient_issubclass(v, BaseField)
}
if potential_fields:
warnings.warn(
"Using ormar.Fields as type Model annotation has been deprecated,"
" check documentation of current version",
DeprecationWarning,
)
potential_fields.update(get_potential_fields(attrs))
for field_name, field in potential_fields.items():
field.name = field_name
attrs = populate_default_pydantic_field_value(field, field_name, attrs)
model_fields[field_name] = field
attrs["__annotations__"][field_name] = (
field.__type__ if not field.nullable else Optional[field.__type__]
)
return attrs, model_fields
def get_pydantic_base_orm_config() -> Type[BaseConfig]:
"""
Returns empty pydantic Config with orm_mode set to True.
:return: empty default config with orm_mode set.
:rtype: pydantic Config
"""
class Config(BaseConfig):
orm_mode = True
return Config
def get_potential_fields(attrs: Dict) -> Dict:
"""
Gets all the fields in current class namespace that are Fields.
:param attrs: current class namespace
:type attrs: Dict
:return: extracted fields that are ormar Fields
:rtype: Dict
"""
return {k: v for k, v in attrs.items() if lenient_issubclass(v, BaseField)}

View File

@ -0,0 +1,217 @@
from typing import TYPE_CHECKING, Type
import ormar
from ormar import ForeignKey, ManyToMany
from ormar.fields import ManyToManyField
from ormar.fields.foreign_key import ForeignKeyField
from ormar.models.helpers.sqlalchemy import adjust_through_many_to_many_model
from ormar.relations import AliasManager
if TYPE_CHECKING: # pragma no cover
from ormar import Model
alias_manager = AliasManager()
def register_relation_on_build(new_model: Type["Model"], field_name: str) -> None:
"""
Registers ForeignKey relation in alias_manager to set a table_prefix.
Registration include also reverse relation side to be able to join both sides.
Relation is registered by model name and relation field name to allow for multiple
relations between two Models that needs to have different
aliases for proper sql joins.
:param new_model: constructed model
:type new_model: Model class
:param field_name: name of the related field
:type field_name: str
"""
alias_manager.add_relation_type(new_model, field_name)
def register_many_to_many_relation_on_build(
new_model: Type["Model"], field: Type[ManyToManyField], field_name: str
) -> None:
"""
Registers connection between through model and both sides of the m2m relation.
Registration include also reverse relation side to be able to join both sides.
Relation is registered by model name and relation field name to allow for multiple
relations between two Models that needs to have different
aliases for proper sql joins.
By default relation name is a model.name.lower().
:param field_name: name of the relation key
:type field_name: str
:param new_model: model on which m2m field is declared
:type new_model: Model class
:param field: relation field
:type field: ManyToManyField class
"""
alias_manager.add_relation_type(
field.through, new_model.get_name(), is_multi=True, reverse_name=field_name
)
alias_manager.add_relation_type(
field.through,
field.to.get_name(),
is_multi=True,
reverse_name=field.related_name or new_model.get_name() + "s",
)
def expand_reverse_relationships(model: Type["Model"]) -> None:
"""
Iterates through model_fields of given model and verifies if all reverse
relation have been populated on related models.
If the reverse relation has not been set before it's set here.
:param model: model on which relation should be checked and registered
:type model: Model class
"""
for model_field in model.Meta.model_fields.values():
if issubclass(model_field, ForeignKeyField):
child_model_name = model_field.related_name or model.get_name() + "s"
parent_model = model_field.to
child = model
if reverse_field_not_already_registered(
child, child_model_name, parent_model
):
register_reverse_model_fields(
parent_model, child, child_model_name, model_field
)
def register_reverse_model_fields(
model: Type["Model"],
child: Type["Model"],
related_name: str,
model_field: Type["ForeignKeyField"],
) -> None:
"""
Registers reverse ForeignKey field on related model.
By default it's name.lower()+'s' of the model on which relation is defined.
But if the related_model name is provided it's registered with that name.
Autogenerated reverse fields also set related_name to the original field name.
:param model: related model on which reverse field should be defined
:type model: Model class
:param child: parent model with relation definition
:type child: Model class
:param related_name: name by which reverse key should be registered
:type related_name: str
:param model_field: original relation ForeignKey field
:type model_field: relation Field
"""
if issubclass(model_field, ManyToManyField):
model.Meta.model_fields[related_name] = ManyToMany(
child,
through=model_field.through,
name=related_name,
virtual=True,
related_name=model_field.name,
)
# register foreign keys on through model
adjust_through_many_to_many_model(model, child, model_field)
else:
model.Meta.model_fields[related_name] = ForeignKey(
child, real_name=related_name, virtual=True, related_name=model_field.name,
)
def register_relation_in_alias_manager(
new_model: Type["Model"], field: Type[ForeignKeyField], field_name: str
) -> None:
"""
Registers the relation (and reverse relation) in alias manager.
The m2m relations require registration of through model between
actual end models of the relation.
Delegates the actual registration to:
m2m - register_many_to_many_relation_on_build
fk - register_relation_on_build
:param new_model: model on which relation field is declared
:type new_model: Model class
:param field: relation field
:type field: ForeignKey or ManyToManyField class
:param field_name: name of the relation key
:type field_name: str
"""
if issubclass(field, ManyToManyField):
register_many_to_many_relation_on_build(
new_model=new_model, field=field, field_name=field_name
)
elif issubclass(field, ForeignKeyField):
register_relation_on_build(new_model=new_model, field_name=field_name)
def verify_related_name_dont_duplicate(
child: Type["Model"], parent_model: Type["Model"], related_name: str,
) -> None:
"""
Verifies whether the used related_name (regardless of the fact if user defined or
auto generated) is already used on related model, but is connected with other model
than the one that we connect right now.
:raises ModelDefinitionError: if name is already used but lead to different related
model
:param child: related Model class
:type child: ormar.models.metaclass.ModelMetaclass
:param parent_model: parent Model class
:type parent_model: ormar.models.metaclass.ModelMetaclass
:param related_name:
:type related_name:
:return: None
:rtype: None
"""
if parent_model.Meta.model_fields.get(related_name):
fk_field = parent_model.Meta.model_fields.get(related_name)
if not fk_field: # pragma: no cover
return
if fk_field.to != child and fk_field.to.Meta != child.Meta:
raise ormar.ModelDefinitionError(
f"Relation with related_name "
f"'{related_name}' "
f"leading to model "
f"{parent_model.get_name(lower=False)} "
f"cannot be used on model "
f"{child.get_name(lower=False)} "
f"because it's already used by model "
f"{fk_field.to.get_name(lower=False)}"
)
def reverse_field_not_already_registered(
child: Type["Model"], child_model_name: str, parent_model: Type["Model"]
) -> bool:
"""
Checks if child is already registered in parents pydantic fields.
:raises ModelDefinitionError: if related name is already used but lead to different
related model
:param child: related Model class
:type child: ormar.models.metaclass.ModelMetaclass
:param child_model_name: related_name of the child if provided
:type child_model_name: str
:param parent_model: parent Model class
:type parent_model: ormar.models.metaclass.ModelMetaclass
:return: result of the check
:rtype: bool
"""
check_result = child_model_name not in parent_model.Meta.model_fields
check_result2 = child.get_name() not in parent_model.Meta.model_fields
if not check_result:
verify_related_name_dont_duplicate(
child=child, parent_model=parent_model, related_name=child_model_name
)
if not check_result2:
verify_related_name_dont_duplicate(
child=child, parent_model=parent_model, related_name=child.get_name()
)
return check_result and check_result2

View File

@ -0,0 +1,213 @@
import copy
import logging
from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Type
import sqlalchemy
from ormar import ForeignKey, Integer, ModelDefinitionError # noqa: I202
from ormar.fields import BaseField, ManyToManyField
from ormar.models.helpers.models import validate_related_names_in_relations
from ormar.models.helpers.pydantic import create_pydantic_field
if TYPE_CHECKING: # pragma no cover
from ormar import Model, ModelMeta
def adjust_through_many_to_many_model(
model: Type["Model"], child: Type["Model"], model_field: Type[ManyToManyField]
) -> None:
"""
Registers m2m relation on through model.
Sets ormar.ForeignKey from through model to both child and parent models.
Sets sqlalchemy.ForeignKey to both child and parent models.
Sets pydantic fields with child and parent model types.
:param model: model on which relation is declared
:type model: Model class
:param child: model to which m2m relation leads
:type child: Model class
:param model_field: relation field defined in parent model
:type model_field: ManyToManyField
"""
model_field.through.Meta.model_fields[model.get_name()] = ForeignKey(
model, real_name=model.get_name(), ondelete="CASCADE"
)
model_field.through.Meta.model_fields[child.get_name()] = ForeignKey(
child, real_name=child.get_name(), ondelete="CASCADE"
)
create_and_append_m2m_fk(model, model_field)
create_and_append_m2m_fk(child, model_field)
create_pydantic_field(model.get_name(), model, model_field)
create_pydantic_field(child.get_name(), child, model_field)
def create_and_append_m2m_fk(
model: Type["Model"], model_field: Type[ManyToManyField]
) -> None:
"""
Registers sqlalchemy Column with sqlalchemy.ForeignKey leadning to the model.
Newly created field is added to m2m relation through model Meta columns and table.
:param model: Model class to which FK should be created
:type model: Model class
:param model_field: field with ManyToMany relation
:type model_field: ManyToManyField field
"""
pk_alias = model.get_column_alias(model.Meta.pkname)
pk_column = next((col for col in model.Meta.columns if col.name == pk_alias), None)
if pk_column is None: # pragma: no cover
raise ModelDefinitionError(
"ManyToMany relation cannot lead to field without pk"
)
column = sqlalchemy.Column(
model.get_name(),
pk_column.type,
sqlalchemy.schema.ForeignKey(
model.Meta.tablename + "." + pk_alias,
ondelete="CASCADE",
onupdate="CASCADE",
),
)
model_field.through.Meta.columns.append(column)
# breakpoint()
model_field.through.Meta.table.append_column(copy.deepcopy(column))
def check_pk_column_validity(
field_name: str, field: BaseField, pkname: Optional[str]
) -> Optional[str]:
"""
Receives the field marked as primary key and verifies if the pkname
was not already set (only one allowed per model) and if field is not marked
as pydantic_only as it needs to be a database field.
:raises ModelDefintionError: if pkname already set or field is pydantic_only
:param field_name: name of field
:type field_name: str
:param field: ormar.Field
:type field: BaseField
:param pkname: already set pkname
:type pkname: Optional[str]
:return: name of the field that should be set as pkname
:rtype: str
"""
if pkname is not None:
raise ModelDefinitionError("Only one primary key column is allowed.")
if field.pydantic_only:
raise ModelDefinitionError("Primary key column cannot be pydantic only")
return field_name
def sqlalchemy_columns_from_model_fields(
model_fields: Dict, new_model: Type["Model"]
) -> Tuple[Optional[str], List[sqlalchemy.Column]]:
"""
Iterates over declared on Model model fields and extracts fields that
should be treated as database fields.
If the model is empty it sets mandatory id field as primary key
(used in through models in m2m relations).
Triggers a validation of relation_names in relation fields. If multiple fields
are leading to the same related model only one can have empty related_name param.
Also related_names have to be unique.
Trigger validation of primary_key - only one and required pk can be set,
cannot be pydantic_only.
Append fields to columns if it's not pydantic_only,
virtual ForeignKey or ManyToMany field.
:raises ModelDefinitionError: if validation of related_names fail,
or pkname validation fails.
:param model_fields: dictionary of declared ormar model fields
:type model_fields: Dict[str, ormar.Field]
:param new_model:
:type new_model: Model class
:return: pkname, list of sqlalchemy columns
:rtype: Tuple[Optional[str], List[sqlalchemy.Column]]
"""
if len(model_fields.keys()) == 0:
model_fields["id"] = Integer(name="id", primary_key=True)
logging.warning(
"Table {table_name} had no fields so auto "
"Integer primary key named `id` created."
)
validate_related_names_in_relations(model_fields, new_model)
columns = []
pkname = None
for field_name, field in model_fields.items():
if field.primary_key:
pkname = check_pk_column_validity(field_name, field, pkname)
if (
not field.pydantic_only
and not field.virtual
and not issubclass(field, ManyToManyField)
):
columns.append(field.get_column(field.get_alias()))
return pkname, columns
def populate_meta_tablename_columns_and_pk(
name: str, new_model: Type["Model"]
) -> Type["Model"]:
"""
Sets Model tablename if it's not already set in Meta.
Default tablename if not present is class name lower + s (i.e. Bed becomes -> beds)
Checks if Model's Meta have pkname and columns set.
If not calls the sqlalchemy_columns_from_model_fields to populate
columns from ormar.fields definitions.
:raises ModelDefinitionError: if pkname is not present raises ModelDefinitionError.
Each model has to have pk.
:param name: name of the current Model
:type name: str
:param new_model: currently constructed Model
:type new_model: ormar.models.metaclass.ModelMetaclass
:return: Model with populated pkname and columns in Meta
:rtype: ormar.models.metaclass.ModelMetaclass
"""
tablename = name.lower() + "s"
new_model.Meta.tablename = (
new_model.Meta.tablename if hasattr(new_model.Meta, "tablename") else tablename
)
pkname: Optional[str]
if hasattr(new_model.Meta, "columns"):
columns = new_model.Meta.columns
pkname = new_model.Meta.pkname
else:
pkname, columns = sqlalchemy_columns_from_model_fields(
new_model.Meta.model_fields, new_model
)
if pkname is None:
raise ModelDefinitionError("Table has to have a primary key.")
new_model.Meta.columns = columns
new_model.Meta.pkname = pkname
return new_model
def populate_meta_sqlalchemy_table_if_required(meta: "ModelMeta") -> None:
"""
Constructs sqlalchemy table out of columns and parameters set on Meta class.
It populates name, metadata, columns and constraints.
:param meta: Meta class of the Model without sqlalchemy table constructed
:type meta: Model class Meta
:return: class with populated Meta.table
:rtype: Model class
"""
if not hasattr(meta, "table"):
meta.table = sqlalchemy.Table(
meta.tablename,
meta.metadata,
*[copy.deepcopy(col) for col in meta.columns],
*meta.constraints,
)

View File

@ -1,20 +1,38 @@
import logging from typing import (
import warnings Any,
from typing import Any, Dict, List, Optional, Set, TYPE_CHECKING, Tuple, Type, Union Dict,
List,
Optional,
Set,
TYPE_CHECKING,
Tuple,
Type,
Union,
cast,
)
import databases import databases
import pydantic import pydantic
import sqlalchemy import sqlalchemy
from pydantic import BaseConfig
from pydantic.fields import ModelField
from pydantic.utils import lenient_issubclass
from sqlalchemy.sql.schema import ColumnCollectionConstraint from sqlalchemy.sql.schema import ColumnCollectionConstraint
import ormar # noqa I100 import ormar # noqa I100
from ormar import ForeignKey, Integer, ModelDefinitionError # noqa I100 from ormar import ForeignKey, Integer, ModelDefinitionError # noqa I100
from ormar.fields import BaseField from ormar.fields import BaseField
from ormar.fields.foreign_key import ForeignKeyField from ormar.fields.foreign_key import ForeignKeyField
from ormar.fields.many_to_many import ManyToMany, ManyToManyField from ormar.fields.many_to_many import ManyToManyField
from ormar.models.helpers import (
alias_manager,
expand_reverse_relationships,
extract_annotations_and_default_vals,
get_potential_fields,
get_pydantic_base_orm_config,
get_pydantic_field,
populate_default_options_values,
populate_meta_sqlalchemy_table_if_required,
populate_meta_tablename_columns_and_pk,
register_relation_in_alias_manager,
)
from ormar.models.quick_access_views import quick_access_set from ormar.models.quick_access_views import quick_access_set
from ormar.queryset import QuerySet from ormar.queryset import QuerySet
from ormar.relations.alias_manager import AliasManager from ormar.relations.alias_manager import AliasManager
@ -23,10 +41,17 @@ from ormar.signals import Signal, SignalEmitter
if TYPE_CHECKING: # pragma no cover if TYPE_CHECKING: # pragma no cover
from ormar import Model from ormar import Model
alias_manager = AliasManager() PARSED_FIELDS_KEY = "__parsed_fields__"
CONFIG_KEY = "Config"
class ModelMeta: class ModelMeta:
"""
Class used for type hinting.
Users can subclass this one for convenience but it's not required.
The only requirement is that ormar.Model has to have inner class with name Meta.
"""
tablename: str tablename: str
table: sqlalchemy.Table table: sqlalchemy.Table
metadata: sqlalchemy.MetaData metadata: sqlalchemy.MetaData
@ -40,294 +65,35 @@ class ModelMeta:
alias_manager: AliasManager alias_manager: AliasManager
property_fields: Set property_fields: Set
signals: SignalEmitter signals: SignalEmitter
abstract: bool
def register_relation_on_build_new(new_model: Type["Model"], field_name: str) -> None:
alias_manager.add_relation_type_new(new_model, field_name)
def register_many_to_many_relation_on_build_new(
new_model: Type["Model"], field: Type[ManyToManyField]
) -> None:
alias_manager.add_relation_type_new(
field.through, new_model.get_name(), is_multi=True
)
alias_manager.add_relation_type_new(
field.through, field.to.get_name(), is_multi=True
)
def reverse_field_not_already_registered(
child: Type["Model"], child_model_name: str, parent_model: Type["Model"]
) -> bool:
return (
child_model_name not in parent_model.__fields__
and child.get_name() not in parent_model.__fields__
)
def expand_reverse_relationships(model: Type["Model"]) -> None:
for model_field in model.Meta.model_fields.values():
if issubclass(model_field, ForeignKeyField):
child_model_name = model_field.related_name or model.get_name() + "s"
parent_model = model_field.to
child = model
if reverse_field_not_already_registered(
child, child_model_name, parent_model
):
register_reverse_model_fields(
parent_model, child, child_model_name, model_field
)
def register_reverse_model_fields(
model: Type["Model"],
child: Type["Model"],
child_model_name: str,
model_field: Type["ForeignKeyField"],
) -> None:
if issubclass(model_field, ManyToManyField):
model.Meta.model_fields[child_model_name] = ManyToMany(
child,
through=model_field.through,
name=child_model_name,
virtual=True,
related_name=model_field.name,
)
# register foreign keys on through model
adjust_through_many_to_many_model(model, child, model_field, child_model_name)
else:
model.Meta.model_fields[child_model_name] = ForeignKey(
child,
real_name=child_model_name,
virtual=True,
related_name=model_field.name,
)
def adjust_through_many_to_many_model(
model: Type["Model"],
child: Type["Model"],
model_field: Type[ManyToManyField],
child_model_name: str,
) -> None:
model_field.through.Meta.model_fields[model.get_name()] = ForeignKey(
model, real_name=model.get_name(), ondelete="CASCADE"
)
model_field.through.Meta.model_fields[child.get_name()] = ForeignKey(
child, real_name=child.get_name(), ondelete="CASCADE"
)
create_and_append_m2m_fk(model, model_field)
create_and_append_m2m_fk(child, model_field)
create_pydantic_field(model.get_name(), model, model_field)
create_pydantic_field(child.get_name(), child, model_field)
def create_pydantic_field(
field_name: str, model: Type["Model"], model_field: Type[ManyToManyField]
) -> None:
model_field.through.__fields__[field_name] = ModelField(
name=field_name,
type_=model,
model_config=model.__config__,
required=False,
class_validators={},
)
def get_pydantic_field(field_name: str, model: Type["Model"]) -> "ModelField":
return ModelField(
name=field_name,
type_=model.Meta.model_fields[field_name].__type__, # type: ignore
model_config=model.__config__,
required=not model.Meta.model_fields[field_name].nullable,
class_validators={},
)
def create_and_append_m2m_fk(
model: Type["Model"], model_field: Type[ManyToManyField]
) -> None:
column = sqlalchemy.Column(
model.get_name(),
model.Meta.table.columns.get(model.get_column_alias(model.Meta.pkname)).type,
sqlalchemy.schema.ForeignKey(
model.Meta.tablename + "." + model.get_column_alias(model.Meta.pkname),
ondelete="CASCADE",
onupdate="CASCADE",
),
)
model_field.through.Meta.columns.append(column)
model_field.through.Meta.table.append_column(column)
def check_pk_column_validity(
field_name: str, field: BaseField, pkname: Optional[str]
) -> Optional[str]:
if pkname is not None:
raise ModelDefinitionError("Only one primary key column is allowed.")
if field.pydantic_only:
raise ModelDefinitionError("Primary key column cannot be pydantic only")
return field_name
def validate_related_names_in_relations(
model_fields: Dict, new_model: Type["Model"]
) -> None:
already_registered: Dict[str, List[Optional[str]]] = dict()
for field in model_fields.values():
if issubclass(field, ForeignKeyField):
previous_related_names = already_registered.setdefault(field.to, [])
if field.related_name in previous_related_names:
raise ModelDefinitionError(
f"Multiple fields declared on {new_model.get_name(lower=False)} "
f"model leading to {field.to.get_name(lower=False)} model without "
f"related_name property set. \nThere can be only one relation with "
f"default/empty name: '{new_model.get_name() + 's'}'"
f"\nTip: provide different related_name for FK and/or M2M fields"
)
else:
previous_related_names.append(field.related_name)
def sqlalchemy_columns_from_model_fields(
model_fields: Dict, table_name: str, new_model: Type["Model"]
) -> Tuple[Optional[str], List[sqlalchemy.Column]]:
columns = []
pkname = None
if len(model_fields.keys()) == 0:
model_fields["id"] = Integer(name="id", primary_key=True)
logging.warning(
"Table {table_name} had no fields so auto "
"Integer primary key named `id` created."
)
validate_related_names_in_relations(model_fields, new_model)
for field_name, field in model_fields.items():
if field.primary_key:
pkname = check_pk_column_validity(field_name, field, pkname)
if (
not field.pydantic_only
and not field.virtual
and not issubclass(field, ManyToManyField)
):
columns.append(field.get_column(field.get_alias()))
return pkname, columns
def register_relation_in_alias_manager_new(
new_model: Type["Model"], field: Type[ForeignKeyField], field_name: str
) -> None:
if issubclass(field, ManyToManyField):
register_many_to_many_relation_on_build_new(new_model=new_model, field=field)
elif issubclass(field, ForeignKeyField):
register_relation_on_build_new(new_model=new_model, field_name=field_name)
def populate_default_pydantic_field_value(
ormar_field: Type[BaseField], field_name: str, attrs: dict
) -> dict:
curr_def_value = attrs.get(field_name, ormar.Undefined)
if lenient_issubclass(curr_def_value, ormar.fields.BaseField):
curr_def_value = ormar.Undefined
if curr_def_value is None:
attrs[field_name] = ormar_field.convert_to_pydantic_field_info(allow_null=True)
else:
attrs[field_name] = ormar_field.convert_to_pydantic_field_info()
return attrs
def populate_pydantic_default_values(attrs: Dict) -> Tuple[Dict, Dict]:
model_fields = {}
potential_fields = {
k: v
for k, v in attrs["__annotations__"].items()
if lenient_issubclass(v, BaseField)
}
if potential_fields:
warnings.warn(
"Using ormar.Fields as type Model annotation has been deprecated,"
" check documentation of current version",
DeprecationWarning,
)
potential_fields.update(
{k: v for k, v in attrs.items() if lenient_issubclass(v, BaseField)}
)
for field_name, field in potential_fields.items():
field.name = field_name
attrs = populate_default_pydantic_field_value(field, field_name, attrs)
model_fields[field_name] = field
attrs["__annotations__"][field_name] = (
field.__type__ if not field.nullable else Optional[field.__type__]
)
return attrs, model_fields
def extract_annotations_and_default_vals(attrs: dict) -> Tuple[Dict, Dict]:
key = "__annotations__"
attrs[key] = attrs.get(key, {})
attrs, model_fields = populate_pydantic_default_values(attrs)
return attrs, model_fields
def populate_meta_tablename_columns_and_pk(
name: str, new_model: Type["Model"]
) -> Type["Model"]:
tablename = name.lower() + "s"
new_model.Meta.tablename = (
new_model.Meta.tablename if hasattr(new_model.Meta, "tablename") else tablename
)
pkname: Optional[str]
if hasattr(new_model.Meta, "columns"):
columns = new_model.Meta.table.columns
pkname = new_model.Meta.pkname
else:
pkname, columns = sqlalchemy_columns_from_model_fields(
new_model.Meta.model_fields, new_model.Meta.tablename, new_model
)
if pkname is None:
raise ModelDefinitionError("Table has to have a primary key.")
new_model.Meta.columns = columns
new_model.Meta.pkname = pkname
return new_model
def populate_meta_sqlalchemy_table_if_required(
new_model: Type["Model"],
) -> Type["Model"]:
if not hasattr(new_model.Meta, "table"):
new_model.Meta.table = sqlalchemy.Table(
new_model.Meta.tablename,
new_model.Meta.metadata,
*new_model.Meta.columns,
*new_model.Meta.constraints,
)
return new_model
def get_pydantic_base_orm_config() -> Type[BaseConfig]:
class Config(BaseConfig):
orm_mode = True
# arbitrary_types_allowed = True
return Config
def check_if_field_has_choices(field: Type[BaseField]) -> bool: def check_if_field_has_choices(field: Type[BaseField]) -> bool:
"""
Checks if given field has choices populated.
A if it has one, a validator for this field needs to be attached.
:param field: ormar field to check
:type field: BaseField
:return: result of the check
:rtype: bool
"""
return hasattr(field, "choices") and bool(field.choices) return hasattr(field, "choices") and bool(field.choices)
def model_initialized_and_has_model_fields(model: Type["Model"]) -> bool:
return hasattr(model, "Meta") and hasattr(model.Meta, "model_fields")
def choices_validator(cls: Type["Model"], values: Dict[str, Any]) -> Dict[str, Any]: def choices_validator(cls: Type["Model"], values: Dict[str, Any]) -> Dict[str, Any]:
"""
Validator that is attached to pydantic model pre root validators.
Validator checks if field value is in field.choices list.
:raises ValueError: if field value is outside of allowed choices.
:param cls: constructed class
:type cls: Model class
:param values: dictionary of field values (pydantic side)
:type values: Dict[str, Any]
:return: values if pass validation, otherwise exception is raised
:rtype: Dict[str, Any]
"""
for field_name, field in cls.Meta.model_fields.items(): for field_name, field in cls.Meta.model_fields.items():
if check_if_field_has_choices(field): if check_if_field_has_choices(field):
value = values.get(field_name, ormar.Undefined) value = values.get(field_name, ormar.Undefined)
@ -341,7 +107,14 @@ def choices_validator(cls: Type["Model"], values: Dict[str, Any]) -> Dict[str, A
def populate_choices_validators(model: Type["Model"]) -> None: # noqa CCR001 def populate_choices_validators(model: Type["Model"]) -> None: # noqa CCR001
if model_initialized_and_has_model_fields(model): """
Checks if Model has any fields with choices set.
If yes it adds choices validation into pre root validators.
:param model: newly constructed Model
:type model: Model class
"""
if not meta_field_not_set(model=model, field_name="model_fields"):
for _, field in model.Meta.model_fields.items(): for _, field in model.Meta.model_fields.items():
if check_if_field_has_choices(field): if check_if_field_has_choices(field):
validators = getattr(model, "__pre_root_validators__", []) validators = getattr(model, "__pre_root_validators__", [])
@ -350,38 +123,79 @@ def populate_choices_validators(model: Type["Model"]) -> None: # noqa CCR001
model.__pre_root_validators__ = validators model.__pre_root_validators__ = validators
def populate_default_options_values(
new_model: Type["Model"], model_fields: Dict
) -> None:
if not hasattr(new_model.Meta, "constraints"):
new_model.Meta.constraints = []
if not hasattr(new_model.Meta, "model_fields"):
new_model.Meta.model_fields = model_fields
def add_cached_properties(new_model: Type["Model"]) -> None: def add_cached_properties(new_model: Type["Model"]) -> None:
"""
Sets cached properties for both pydantic and ormar models.
Quick access fields are fields grabbed in getattribute to skip all checks.
Related fields and names are populated to None as they can change later.
When children models are constructed they can modify parent to register itself.
All properties here are used as "cache" to not recalculate them constantly.
:param new_model: newly constructed Model
:type new_model: Model class
"""
new_model._quick_access_fields = quick_access_set new_model._quick_access_fields = quick_access_set
new_model._related_names = None new_model._related_names = None
new_model._related_fields = None
new_model._pydantic_fields = {name for name in new_model.__fields__} new_model._pydantic_fields = {name for name in new_model.__fields__}
def meta_field_not_set(model: Type["Model"], field_name: str) -> bool: def meta_field_not_set(model: Type["Model"], field_name: str) -> bool:
"""
Checks if field with given name is already present in model.Meta.
Then check if it's set to something truthful
(in practice meaning not None, as it's non or ormar Field only).
:param model: newly constructed model
:type model: Model class
:param field_name: name of the ormar field
:type field_name: str
:return: result of the check
:rtype: bool
"""
return not hasattr(model.Meta, field_name) or not getattr(model.Meta, field_name) return not hasattr(model.Meta, field_name) or not getattr(model.Meta, field_name)
def add_property_fields(new_model: Type["Model"], attrs: Dict) -> None: # noqa: CCR001 def add_property_fields(new_model: Type["Model"], attrs: Dict) -> None: # noqa: CCR001
"""
Checks class namespace for properties or functions with __property_field__.
If attribute have __property_field__ it was decorated with @property_field.
Functions like this are exposed in dict() (therefore also fastapi result).
Names of property fields are cached for quicker access / extraction.
:param new_model: newly constructed model
:type new_model: Model class
:param attrs:
:type attrs: Dict[str, str]
"""
props = set()
for var_name, value in attrs.items():
if isinstance(value, property):
value = value.fget
field_config = getattr(value, "__property_field__", None)
if field_config:
props.add(var_name)
if meta_field_not_set(model=new_model, field_name="property_fields"): if meta_field_not_set(model=new_model, field_name="property_fields"):
props = set()
for var_name, value in attrs.items():
if isinstance(value, property):
value = value.fget
field_config = getattr(value, "__property_field__", None)
if field_config:
props.add(var_name)
new_model.Meta.property_fields = props new_model.Meta.property_fields = props
else:
new_model.Meta.property_fields = new_model.Meta.property_fields.union(props)
def register_signals(new_model: Type["Model"]) -> None: # noqa: CCR001 def register_signals(new_model: Type["Model"]) -> None: # noqa: CCR001
"""
Registers on model's SignalEmmiter and sets pre defined signals.
Predefined signals are (pre/post) + (save/update/delete).
Signals are emitted in both model own methods and in selected queryset ones.
:param new_model: newly constructed model
:type new_model: Model class
"""
if meta_field_not_set(model=new_model, field_name="signals"): if meta_field_not_set(model=new_model, field_name="signals"):
signals = SignalEmitter() signals = SignalEmitter()
signals.pre_save = Signal() signals.pre_save = Signal()
@ -393,36 +207,395 @@ def register_signals(new_model: Type["Model"]) -> None: # noqa: CCR001
new_model.Meta.signals = signals new_model.Meta.signals = signals
def update_attrs_and_fields(
attrs: Dict,
new_attrs: Dict,
model_fields: Dict,
new_model_fields: Dict,
new_fields: Set,
) -> Dict:
"""
Updates __annotations__, values of model fields (so pydantic FieldInfos)
as well as model.Meta.model_fields definitions from parents.
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param new_attrs: part of the namespace extracted from parent class
:type new_attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:param new_model_fields: ormar fields defined in parent classes
:type new_model_fields: Dict[str, BaseField]
:param new_fields: set of new fields names
:type new_fields: Set[str]
"""
key = "__annotations__"
attrs[key].update(new_attrs[key])
attrs.update({name: new_attrs[name] for name in new_fields})
updated_model_fields = {k: v for k, v in new_model_fields.items()}
updated_model_fields.update(model_fields)
return updated_model_fields
def verify_constraint_names(
base_class: "Model", model_fields: Dict, parent_value: List
) -> None:
"""
Verifies if redefined fields that are overwritten in subclasses did not remove
any name of the column that is used in constraint as it will fail in sqlalchemy
Table creation.
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:param parent_value: list of base class constraints
:type parent_value: List
"""
new_aliases = {x.name: x.get_alias() for x in model_fields.values()}
old_aliases = {x.name: x.get_alias() for x in base_class.Meta.model_fields.values()}
old_aliases.update(new_aliases)
constraints_columns = [x._pending_colargs for x in parent_value]
for column_set in constraints_columns:
if any(x not in old_aliases.values() for x in column_set):
raise ModelDefinitionError(
f"Unique columns constraint "
f"{column_set} "
f"has column names "
f"that are not in the model fields."
f"\n Check columns redefined in subclasses "
f"to verify that they have proper 'name' set."
)
def update_attrs_from_base_meta( # noqa: CCR001
base_class: "Model", attrs: Dict, model_fields: Dict
) -> None:
"""
Updates Meta parameters in child from parent if needed.
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
"""
params_to_update = ["metadata", "database", "constraints"]
for param in params_to_update:
current_value = attrs.get("Meta", {}).__dict__.get(param, ormar.Undefined)
parent_value = (
base_class.Meta.__dict__.get(param) if hasattr(base_class, "Meta") else None
)
if parent_value:
if param == "constraints":
verify_constraint_names(
base_class=base_class,
model_fields=model_fields,
parent_value=parent_value,
)
parent_value = [
ormar.UniqueColumns(*x._pending_colargs) for x in parent_value
]
if isinstance(current_value, list):
current_value.extend(parent_value)
else:
setattr(attrs["Meta"], param, parent_value)
def copy_and_replace_m2m_through_model(
field: Type[ManyToManyField],
field_name: str,
table_name: str,
parent_fields: Dict,
attrs: Dict,
meta: ModelMeta,
) -> None:
"""
Clones class with Through model for m2m relations, appends child name to the name
of the cloned class.
Clones non foreign keys fields from parent model, the same with database columns.
Modifies related_name with appending child table name after '_'
For table name, the table name of child is appended after '_'.
Removes the original sqlalchemy table from metadata if it was not removed.
:param field: field with relations definition
:type field: Type[ManyToManyField]
:param field_name: name of the relation field
:type field_name: str
:param table_name: name of the table
:type table_name: str
:param parent_fields: dictionary of fields to copy to new models from parent
:type parent_fields: Dict
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param meta: metaclass of currently created model
:type meta: ModelMeta
"""
copy_field: Type[BaseField] = type( # type: ignore
field.__name__, (ManyToManyField, BaseField), dict(field.__dict__)
)
related_name = field.related_name + "_" + table_name
copy_field.related_name = related_name # type: ignore
through_class = field.through
new_meta: ormar.ModelMeta = type( # type: ignore
"Meta", (), dict(through_class.Meta.__dict__),
)
new_meta.tablename += "_" + meta.tablename
# create new table with copied columns but remove foreign keys
# they will be populated later in expanding reverse relation
if hasattr(new_meta, "table"):
del new_meta.table
new_meta.columns = [col for col in new_meta.columns if not col.foreign_keys]
new_meta.model_fields = {
name: field
for name, field in new_meta.model_fields.items()
if not issubclass(field, ForeignKeyField)
}
populate_meta_sqlalchemy_table_if_required(new_meta)
copy_name = through_class.__name__ + attrs.get("__name__", "")
copy_through = type(copy_name, (ormar.Model,), {"Meta": new_meta})
copy_field.through = copy_through
parent_fields[field_name] = copy_field
if through_class.Meta.table in through_class.Meta.metadata:
through_class.Meta.metadata.remove(through_class.Meta.table)
def copy_data_from_parent_model( # noqa: CCR001
base_class: Type["Model"],
curr_class: type,
attrs: Dict,
model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
],
) -> Tuple[Dict, Dict]:
"""
Copy the key parameters [databse, metadata, property_fields and constraints]
and fields from parent models. Overwrites them if needed.
Only abstract classes can be subclassed.
Since relation fields requires different related_name for different children
:raises ModelDefinitionError: if non abstract model is subclassed
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param curr_class: current constructed class
:type curr_class: Model or model parent class
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:return: updated attrs and model_fields
:rtype: Tuple[Dict, Dict]
"""
if attrs.get("Meta"):
if model_fields and not base_class.Meta.abstract: # type: ignore
raise ModelDefinitionError(
f"{curr_class.__name__} cannot inherit "
f"from non abstract class {base_class.__name__}"
)
update_attrs_from_base_meta(
base_class=base_class, # type: ignore
attrs=attrs,
model_fields=model_fields,
)
parent_fields: Dict = dict()
meta = attrs.get("Meta")
if not meta: # pragma: no cover
raise ModelDefinitionError(
f"Model {curr_class.__name__} declared without Meta"
)
table_name = (
meta.tablename
if hasattr(meta, "tablename") and meta.tablename
else attrs.get("__name__", "").lower() + "s"
)
for field_name, field in base_class.Meta.model_fields.items():
if issubclass(field, ManyToManyField):
copy_and_replace_m2m_through_model(
field=field,
field_name=field_name,
table_name=table_name,
parent_fields=parent_fields,
attrs=attrs,
meta=meta,
)
elif issubclass(field, ForeignKeyField) and field.related_name:
copy_field = type( # type: ignore
field.__name__, (ForeignKeyField, BaseField), dict(field.__dict__)
)
related_name = field.related_name + "_" + table_name
copy_field.related_name = related_name # type: ignore
parent_fields[field_name] = copy_field
else:
parent_fields[field_name] = field
parent_fields.update(model_fields) # type: ignore
model_fields = parent_fields
return attrs, model_fields
def extract_from_parents_definition( # noqa: CCR001
base_class: type,
curr_class: type,
attrs: Dict,
model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
],
) -> Tuple[Dict, Dict]:
"""
Extracts fields from base classes if they have valid oramr fields.
If model was already parsed -> fields definitions need to be removed from class
cause pydantic complains about field re-definition so after first child
we need to extract from __parsed_fields__ not the class itself.
If the class is parsed first time annotations and field definition is parsed
from the class.__dict__.
If the class is a ormar.Model it is skipped.
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param curr_class: current constructed class
:type curr_class: Model or model parent class
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:return: updated attrs and model_fields
:rtype: Tuple[Dict, Dict]
"""
if hasattr(base_class, "Meta"):
base_class = cast(Type["Model"], base_class)
return copy_data_from_parent_model(
base_class=base_class,
curr_class=curr_class,
attrs=attrs,
model_fields=model_fields,
)
key = "__annotations__"
if hasattr(base_class, PARSED_FIELDS_KEY):
# model was already parsed -> fields definitions need to be removed from class
# cause pydantic complains about field re-definition so after first child
# we need to extract from __parsed_fields__ not the class itself
new_attrs, new_model_fields = getattr(base_class, PARSED_FIELDS_KEY)
new_fields = set(new_model_fields.keys())
model_fields = update_attrs_and_fields(
attrs=attrs,
new_attrs=new_attrs,
model_fields=model_fields,
new_model_fields=new_model_fields,
new_fields=new_fields,
)
return attrs, model_fields
potential_fields = get_potential_fields(base_class.__dict__)
if potential_fields:
# parent model has ormar fields defined and was not parsed before
new_attrs = {key: {k: v for k, v in base_class.__dict__.get(key, {}).items()}}
new_attrs.update(potential_fields)
new_fields = set(potential_fields.keys())
for name in new_fields:
delattr(base_class, name)
new_attrs, new_model_fields = extract_annotations_and_default_vals(new_attrs)
setattr(base_class, PARSED_FIELDS_KEY, (new_attrs, new_model_fields))
model_fields = update_attrs_and_fields(
attrs=attrs,
new_attrs=new_attrs,
model_fields=model_fields,
new_model_fields=new_model_fields,
new_fields=new_fields,
)
return attrs, model_fields
class ModelMetaclass(pydantic.main.ModelMetaclass): class ModelMetaclass(pydantic.main.ModelMetaclass):
def __new__( # type: ignore def __new__( # type: ignore # noqa: CCR001
mcs: "ModelMetaclass", name: str, bases: Any, attrs: dict mcs: "ModelMetaclass", name: str, bases: Any, attrs: dict
) -> "ModelMetaclass": ) -> "ModelMetaclass":
"""
Metaclass used by ormar Models that performs configuration
and build of ormar Models.
Sets pydantic configuration.
Extract model_fields and convert them to pydantic FieldInfo,
updates class namespace.
Extracts settings and fields from parent classes.
Fetches methods decorated with @property_field decorator
to expose them later in dict().
Construct parent pydantic Metaclass/ Model.
If class has Meta class declared (so actual ormar Models) it also:
* populate sqlalchemy columns, pkname and tables from model_fields
* register reverse relationships on related models
* registers all relations in alias manager that populates table_prefixes
* exposes alias manager on each Model
* creates QuerySet for each model and exposes it on a class
:param name: name of current class
:type name: str
:param bases: base classes
:type bases: Tuple
:param attrs: class namespace
:type attrs: Dict
"""
attrs["Config"] = get_pydantic_base_orm_config() attrs["Config"] = get_pydantic_base_orm_config()
attrs["__name__"] = name attrs["__name__"] = name
attrs, model_fields = extract_annotations_and_default_vals(attrs) attrs, model_fields = extract_annotations_and_default_vals(attrs)
for base in reversed(bases):
mod = base.__module__
if mod.startswith("ormar.models.") or mod.startswith("pydantic."):
continue
attrs, model_fields = extract_from_parents_definition(
base_class=base, curr_class=mcs, attrs=attrs, model_fields=model_fields
)
new_model = super().__new__( # type: ignore new_model = super().__new__( # type: ignore
mcs, name, bases, attrs mcs, name, bases, attrs
) )
add_cached_properties(new_model) add_cached_properties(new_model)
if hasattr(new_model, "Meta"): if hasattr(new_model, "Meta"):
populate_default_options_values(new_model, model_fields) populate_default_options_values(new_model, model_fields)
new_model = populate_meta_tablename_columns_and_pk(name, new_model)
new_model = populate_meta_sqlalchemy_table_if_required(new_model)
expand_reverse_relationships(new_model)
for field_name, field in new_model.Meta.model_fields.items():
register_relation_in_alias_manager_new(new_model, field, field_name)
populate_choices_validators(new_model)
if new_model.Meta.pkname not in attrs["__annotations__"]:
field_name = new_model.Meta.pkname
attrs["__annotations__"][field_name] = Optional[int] # type: ignore
attrs[field_name] = None
new_model.__fields__[field_name] = get_pydantic_field(
field_name=field_name, model=new_model
)
new_model.Meta.alias_manager = alias_manager
new_model.objects = QuerySet(new_model)
add_property_fields(new_model, attrs) add_property_fields(new_model, attrs)
register_signals(new_model=new_model) register_signals(new_model=new_model)
populate_choices_validators(new_model)
if not new_model.Meta.abstract:
new_model = populate_meta_tablename_columns_and_pk(name, new_model)
populate_meta_sqlalchemy_table_if_required(new_model.Meta)
expand_reverse_relationships(new_model)
for field_name, field in new_model.Meta.model_fields.items():
register_relation_in_alias_manager(new_model, field, field_name)
if new_model.Meta.pkname not in attrs["__annotations__"]:
field_name = new_model.Meta.pkname
attrs["__annotations__"][field_name] = Optional[int] # type: ignore
attrs[field_name] = None
new_model.__fields__[field_name] = get_pydantic_field(
field_name=field_name, model=new_model
)
new_model.Meta.alias_manager = alias_manager
new_model.objects = QuerySet(new_model)
return new_model return new_model

View File

@ -0,0 +1,19 @@
"""
Package contains functionalities divided by features.
All mixins are combined into ModelTableProxy which is one of the parents of Model.
The split into mixins was done to ease the maintainability of the proxy class, as
it became quite complicated over time.
"""
from ormar.models.mixins.alias_mixin import AliasMixin
from ormar.models.mixins.excludable_mixin import ExcludableMixin
from ormar.models.mixins.merge_mixin import MergeModelMixin
from ormar.models.mixins.prefetch_mixin import PrefetchQueryMixin
from ormar.models.mixins.save_mixin import SavePrepareMixin
__all__ = [
"MergeModelMixin",
"AliasMixin",
"PrefetchQueryMixin",
"SavePrepareMixin",
"ExcludableMixin",
]

View File

@ -0,0 +1,72 @@
from typing import Dict, TYPE_CHECKING
class AliasMixin:
"""
Used to translate field names into database column names.
"""
if TYPE_CHECKING: # pragma: no cover
from ormar import ModelMeta
Meta: ModelMeta
@classmethod
def get_column_alias(cls, field_name: str) -> str:
"""
Returns db alias (column name in db) for given ormar field.
For fields without alias field name is returned.
:param field_name: name of the field to get alias from
:type field_name: str
:return: alias (db name) if set, otherwise passed name
:rtype: str
"""
field = cls.Meta.model_fields.get(field_name)
return field.get_alias() if field is not None else field_name
@classmethod
def get_column_name_from_alias(cls, alias: str) -> str:
"""
Returns ormar field name for given db alias (column name in db).
If field do not have alias it's returned as is.
:param alias:
:type alias: str
:return: field name if set, otherwise passed alias (db name)
:rtype: str
"""
for field_name, field in cls.Meta.model_fields.items():
if field.get_alias() == alias:
return field_name
return alias # if not found it's not an alias but actual name
@classmethod
def translate_columns_to_aliases(cls, new_kwargs: Dict) -> Dict:
"""
Translates dictionary of model fields changing field names into aliases.
If field has no alias the field name remains intact.
Only fields present in the dictionary are translated.
:param new_kwargs: dict with fields names and their values
:type new_kwargs: Dict
:return: dict with aliases and their values
:rtype: Dict
"""
for field_name, field in cls.Meta.model_fields.items():
if field_name in new_kwargs:
new_kwargs[field.get_alias()] = new_kwargs.pop(field_name)
return new_kwargs
@classmethod
def translate_aliases_to_columns(cls, new_kwargs: Dict) -> Dict:
"""
Translates dictionary of model fields changing aliases into field names.
If field has no alias the alias is already a field name.
Only fields present in the dictionary are translated.
:param new_kwargs: dict with aliases and their values
:type new_kwargs: Dict
:return: dict with fields names and their values
:rtype: Dict
"""
for field_name, field in cls.Meta.model_fields.items():
if field.alias and field.alias in new_kwargs:
new_kwargs[field_name] = new_kwargs.pop(field.alias)
return new_kwargs

View File

@ -0,0 +1,288 @@
from typing import (
AbstractSet,
Any,
Dict,
List,
Mapping,
Optional,
Set,
TYPE_CHECKING,
Type,
TypeVar,
Union,
)
from ormar.models.mixins.relation_mixin import RelationMixin
from ormar.queryset.utils import translate_list_to_dict, update
if TYPE_CHECKING: # pragma no cover
from ormar import Model
T = TypeVar("T", bound=Model)
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
MappingIntStrAny = Mapping[IntStr, Any]
class ExcludableMixin(RelationMixin):
"""
Used to include/exclude given set of fields on models during load and dict() calls.
"""
if TYPE_CHECKING: # pragma: no cover
from ormar import Model
@staticmethod
def get_child(
items: Union[Set, Dict, None], key: str = None
) -> Union[Set, Dict, None]:
"""
Used to get nested dictionaries keys if they exists otherwise returns
passed items.
:param items: bag of items to include or exclude
:type items: Union[Set, Dict, None]
:param key: name of the child to extract
:type key: str
:return: child extracted from items if exists
:rtype: Union[Set, Dict, None]
"""
if isinstance(items, dict):
return items.get(key, {})
return items
@staticmethod
def get_excluded(
exclude: Union[Set, Dict, None], key: str = None
) -> Union[Set, Dict, None]:
"""
Proxy to ExcludableMixin.get_child for exclusions.
:param exclude: bag of items to exclude
:type exclude: Union[Set, Dict, None]
:param key: name of the child to extract
:type key: str
:return: child extracted from items if exists
:rtype: Union[Set, Dict, None]
"""
return ExcludableMixin.get_child(items=exclude, key=key)
@staticmethod
def get_included(
include: Union[Set, Dict, None], key: str = None
) -> Union[Set, Dict, None]:
"""
Proxy to ExcludableMixin.get_child for inclusions.
:param include: bag of items to include
:type include: Union[Set, Dict, None]
:param key: name of the child to extract
:type key: str
:return: child extracted from items if exists
:rtype: Union[Set, Dict, None]
"""
return ExcludableMixin.get_child(items=include, key=key)
@staticmethod
def is_excluded(exclude: Union[Set, Dict, None], key: str = None) -> bool:
"""
Checks if given key should be excluded on model/ dict.
:param exclude: bag of items to exclude
:type exclude: Union[Set, Dict, None]
:param key: name of the child to extract
:type key: str
:return: child extracted from items if exists
:rtype: Union[Set, Dict, None]
"""
if exclude is None:
return False
if exclude is Ellipsis: # pragma: nocover
return True
to_exclude = ExcludableMixin.get_excluded(exclude=exclude, key=key)
if isinstance(to_exclude, Set):
return key in to_exclude
if to_exclude is ...:
return True
return False
@staticmethod
def is_included(include: Union[Set, Dict, None], key: str = None) -> bool:
"""
Checks if given key should be included on model/ dict.
:param include: bag of items to include
:type include: Union[Set, Dict, None]
:param key: name of the child to extract
:type key: str
:return: child extracted from items if exists
:rtype: Union[Set, Dict, None]
"""
if include is None:
return True
if include is Ellipsis:
return True
to_include = ExcludableMixin.get_included(include=include, key=key)
if isinstance(to_include, Set):
return key in to_include
if to_include is ...:
return True
return False
@staticmethod
def _populate_pk_column(
model: Type["Model"], columns: List[str], use_alias: bool = False,
) -> List[str]:
"""
Adds primary key column/alias (depends on use_alias flag) to list of
column names that are selected.
:param model: model on columns are selected
:type model: Type["Model"]
:param columns: list of columns names
:type columns: List[str]
:param use_alias: flag to set if aliases or field names should be used
:type use_alias: bool
:return: list of columns names with pk column in it
:rtype: List[str]
"""
pk_alias = (
model.get_column_alias(model.Meta.pkname)
if use_alias
else model.Meta.pkname
)
if pk_alias not in columns:
columns.append(pk_alias)
return columns
@classmethod
def own_table_columns(
cls,
model: Type["Model"],
fields: Optional[Union[Set, Dict]],
exclude_fields: Optional[Union[Set, Dict]],
use_alias: bool = False,
) -> List[str]:
"""
Returns list of aliases or field names for given model.
Aliases/names switch is use_alias flag.
If provided only fields included in fields will be returned.
If provided fields in exclude_fields will be excluded in return.
Primary key field is always added and cannot be excluded (will be added anyway).
:param model: model on columns are selected
:type model: Type["Model"]
:param fields: set/dict of fields to include
:type fields: Optional[Union[Set, Dict]]
:param exclude_fields: set/dict of fields to exclude
:type exclude_fields: Optional[Union[Set, Dict]]
:param use_alias: flag if aliases or field names should be used
:type use_alias: bool
:return: list of column field names or aliases
:rtype: List[str]
"""
columns = [
model.get_column_name_from_alias(col.name) if not use_alias else col.name
for col in model.Meta.table.columns
]
field_names = [
model.get_column_name_from_alias(col.name)
for col in model.Meta.table.columns
]
if fields:
columns = [
col
for col, name in zip(columns, field_names)
if model.is_included(fields, name)
]
if exclude_fields:
columns = [
col
for col, name in zip(columns, field_names)
if not model.is_excluded(exclude_fields, name)
]
# always has to return pk column for ormar to work
columns = cls._populate_pk_column(
model=model, columns=columns, use_alias=use_alias
)
return columns
@classmethod
def _update_excluded_with_related_not_required(
cls,
exclude: Union["AbstractSetIntStr", "MappingIntStrAny", None],
nested: bool = False,
) -> Union[Set, Dict]:
"""
Used during generation of the dict().
To avoid cyclical references and max recurrence limit nested models have to
exclude related models that are not mandatory.
For a main model (not nested) only nullable related field names are added to
exclusion, for nested models all related models are excluded.
:param exclude: set/dict with fields to exclude
:type exclude: Union[Set, Dict, None]
:param nested: flag setting nested models (child of previous one, not main one)
:type nested: bool
:return: set or dict with excluded fields added.
:rtype: Union[Set, Dict]
"""
exclude = exclude or {}
related_set = cls._exclude_related_names_not_required(nested=nested)
if isinstance(exclude, set):
exclude.union(related_set)
else:
related_dict = translate_list_to_dict(related_set)
exclude = update(related_dict, exclude)
return exclude
@classmethod
def get_names_to_exclude(
cls,
fields: Optional[Union[Dict, Set]] = None,
exclude_fields: Optional[Union[Dict, Set]] = None,
) -> Set:
"""
Returns a set of models field names that should be explicitly excluded
during model initialization.
Those fields will be set to None to avoid ormar/pydantic setting default
values on them. They should be returned as None in any case.
Used in parsing data from database rows that construct Models by initializing
them with dicts constructed from those db rows.
:param fields: set/dict of fields to include
:type fields: Optional[Union[Set, Dict]]
:param exclude_fields: set/dict of fields to exclude
:type exclude_fields: Optional[Union[Set, Dict]]
:return: set of field names that should be excluded
:rtype: Set
"""
fields_names = cls.extract_db_own_fields()
if fields and fields is not Ellipsis:
fields_to_keep = {name for name in fields if name in fields_names}
else:
fields_to_keep = fields_names
fields_to_exclude = fields_names - fields_to_keep
if isinstance(exclude_fields, Set):
fields_to_exclude = fields_to_exclude.union(
{name for name in exclude_fields if name in fields_names}
)
elif isinstance(exclude_fields, Dict):
new_to_exclude = {
name
for name in exclude_fields
if name in fields_names and exclude_fields[name] is Ellipsis
}
fields_to_exclude = fields_to_exclude.union(new_to_exclude)
fields_to_exclude = fields_to_exclude - {cls.Meta.pkname}
return fields_to_exclude

View File

@ -0,0 +1,79 @@
from collections import OrderedDict
from typing import List, Sequence, TYPE_CHECKING
import ormar
if TYPE_CHECKING: # pragma no cover
from ormar import Model
class MergeModelMixin:
"""
Used to merge models instances returned by database,
but already initialized to ormar Models.keys
Models can duplicate during joins when parent model has multiple child rows,
in the end all parent (main) models should be unique.
"""
@classmethod
def merge_instances_list(cls, result_rows: Sequence["Model"]) -> Sequence["Model"]:
"""
Merges a list of models into list of unique models.
Models can duplicate during joins when parent model has multiple child rows,
in the end all parent (main) models should be unique.
:param result_rows: list of already initialized Models with child models
populated, each instance is one row in db and some models can duplicate
:type result_rows: List["Model"]
:return: list of merged models where each main model is unique
:rtype: List["Model"]
"""
merged_rows: List["Model"] = []
grouped_instances: OrderedDict = OrderedDict()
for model in result_rows:
grouped_instances.setdefault(model.pk, []).append(model)
for group in grouped_instances.values():
model = group.pop(0)
if group:
for next_model in group:
model = cls.merge_two_instances(next_model, model)
merged_rows.append(model)
return merged_rows
@classmethod
def merge_two_instances(cls, one: "Model", other: "Model") -> "Model":
"""
Merges current (other) Model and previous one (one) and returns the current
Model instance with data merged from previous one.
If needed it's calling itself recurrently and merges also children models.
:param one: previous model instance
:type one: Model
:param other: current model instance
:type other: Model
:return: current Model instance with data merged from previous one.
:rtype: Model
"""
for field in one.Meta.model_fields.keys():
current_field = getattr(one, field)
if isinstance(current_field, list) and not isinstance(
current_field, ormar.Model
):
setattr(other, field, current_field + getattr(other, field))
elif (
isinstance(current_field, ormar.Model)
and current_field.pk == getattr(other, field).pk
):
setattr(
other,
field,
cls.merge_two_instances(current_field, getattr(other, field)),
)
other.set_save_status(True)
return other

View File

@ -0,0 +1,122 @@
from typing import Callable, Dict, List, TYPE_CHECKING, Tuple, Type
import ormar
from ormar.fields import BaseField
from ormar.models.mixins.relation_mixin import RelationMixin
class PrefetchQueryMixin(RelationMixin):
"""
Used in PrefetchQuery to extract ids and names of models to prefetch.
"""
if TYPE_CHECKING: # pragma no cover
from ormar import Model
get_name: Callable # defined in NewBaseModel
@staticmethod
def get_clause_target_and_filter_column_name(
parent_model: Type["Model"],
target_model: Type["Model"],
reverse: bool,
related: str,
) -> Tuple[Type["Model"], str]:
"""
Returns Model on which query clause should be performed and name of the column.
:param parent_model: related model that the relation lead to
:type parent_model: Type[Model]
:param target_model: model on which query should be perfomed
:type target_model: Type[Model]
:param reverse: flag if the relation is reverse
:type reverse: bool
:param related: name of the relation field
:type related: str
:return: Model on which query clause should be performed and name of the column
:rtype: Tuple[Type[Model], str]
"""
if reverse:
field_name = (
parent_model.Meta.model_fields[related].related_name
or parent_model.get_name() + "s"
)
field = target_model.Meta.model_fields[field_name]
if issubclass(field, ormar.fields.ManyToManyField):
field_name = field.default_target_field_name()
sub_field = field.through.Meta.model_fields[field_name]
return field.through, sub_field.get_alias()
return target_model, field.get_alias()
target_field = target_model.get_column_alias(target_model.Meta.pkname)
return target_model, target_field
@staticmethod
def get_column_name_for_id_extraction(
parent_model: Type["Model"], reverse: bool, related: str, use_raw: bool,
) -> str:
"""
Returns name of the column that should be used to extract ids from model.
Depending on the relation side it's either primary key column of parent model
or field name specified by related parameter.
:param parent_model: model from which id column should be extracted
:type parent_model: Type[Model]
:param reverse: flag if the relation is reverse
:type reverse: bool
:param related: name of the relation field
:type related: str
:param use_raw: flag if aliases or field names should be used
:type use_raw: bool
:return:
:rtype:
"""
if reverse:
column_name = parent_model.Meta.pkname
return (
parent_model.get_column_alias(column_name) if use_raw else column_name
)
column = parent_model.Meta.model_fields[related]
return column.get_alias() if use_raw else column.name
@classmethod
def get_related_field_name(cls, target_field: Type["BaseField"]) -> str:
"""
Returns name of the relation field that should be used in prefetch query.
This field is later used to register relation in prefetch query,
populate relations dict, and populate nested model in prefetch query.
:param target_field: relation field that should be used in prefetch
:type target_field: Type[BaseField]
:return: name of the field
:rtype: str
"""
if issubclass(target_field, ormar.fields.ManyToManyField):
return cls.get_name()
if target_field.virtual:
return target_field.related_name or cls.get_name() + "s"
return target_field.to.Meta.pkname
@classmethod
def get_filtered_names_to_extract(cls, prefetch_dict: Dict) -> List:
"""
Returns list of related fields names that should be followed to prefetch related
models from.
List of models is translated into dict to assure each model is extracted only
once in one query, that's why this function accepts prefetch_dict not list.
Only relations from current model are returned.
:param prefetch_dict: dictionary of fields to extract
:type prefetch_dict: Dict
:return: list of fields names to extract
:rtype: List
"""
related_to_extract = []
if prefetch_dict and prefetch_dict is not Ellipsis:
related_to_extract = [
related
for related in cls.extract_related_names()
if related in prefetch_dict
]
return related_to_extract

View File

@ -0,0 +1,107 @@
import inspect
from typing import List, Optional, Set, TYPE_CHECKING
from ormar.fields.foreign_key import ForeignKeyField
class RelationMixin:
"""
Used to return relation fields/names etc. from given model
"""
if TYPE_CHECKING: # pragma no cover
from ormar import ModelMeta
Meta: ModelMeta
_related_names: Optional[Set]
_related_fields: Optional[List]
@classmethod
def extract_db_own_fields(cls) -> Set:
"""
Returns only fields that are stored in the own database table, exclude all
related fields.
:return: set of model fields with relation fields excluded
:rtype: Set
"""
related_names = cls.extract_related_names()
self_fields = {
name for name in cls.Meta.model_fields.keys() if name not in related_names
}
return self_fields
@classmethod
def extract_related_fields(cls) -> List:
"""
Returns List of ormar Fields for all relations declared on a model.
List is cached in cls._related_fields for quicker access.
:return: list of related fields
:rtype: List
"""
if isinstance(cls._related_fields, List):
return cls._related_fields
related_fields = []
for name in cls.extract_related_names():
related_fields.append(cls.Meta.model_fields[name])
cls._related_fields = related_fields
return related_fields
@classmethod
def extract_related_names(cls) -> Set:
"""
Returns List of fields names for all relations declared on a model.
List is cached in cls._related_names for quicker access.
:return: list of related fields names
:rtype: List
"""
if isinstance(cls._related_names, Set):
return cls._related_names
related_names = set()
for name, field in cls.Meta.model_fields.items():
if inspect.isclass(field) and issubclass(field, ForeignKeyField):
related_names.add(name)
cls._related_names = related_names
return related_names
@classmethod
def _extract_db_related_names(cls) -> Set:
"""
Returns only fields that are stored in the own database table, exclude
related fields that are not stored as foreign keys on given model.
:return: set of model fields with non fk relation fields excluded
:rtype: Set
"""
related_names = cls.extract_related_names()
related_names = {
name
for name in related_names
if cls.Meta.model_fields[name].is_valid_uni_relation()
}
return related_names
@classmethod
def _exclude_related_names_not_required(cls, nested: bool = False) -> Set:
"""
Returns a set of non mandatory related models field names.
For a main model (not nested) only nullable related field names are returned,
for nested models all related models are returned.
:param nested: flag setting nested models (child of previous one, not main one)
:type nested: bool
:return: set of non mandatory related fields
:rtype: Set
"""
if nested:
return cls.extract_related_names()
related_names = cls.extract_related_names()
related_names = {
name for name in related_names if cls.Meta.model_fields[name].nullable
}
return related_names

View File

@ -0,0 +1,111 @@
from typing import Dict
import ormar
from ormar.exceptions import ModelPersistenceError
from ormar.models.mixins import AliasMixin
from ormar.models.mixins.relation_mixin import RelationMixin
class SavePrepareMixin(RelationMixin, AliasMixin):
"""
Used to prepare models to be saved in database
"""
@classmethod
def prepare_model_to_save(cls, new_kwargs: dict) -> dict:
"""
Combines all preparation methods before saving.
Removes primary key for if it's nullable or autoincrement pk field,
and it's set to None.
Substitute related models with their primary key values as fk column.
Populates the default values for field with default set and no value.
Translate columns into aliases (db names).
:param new_kwargs: dictionary of model that is about to be saved
:type new_kwargs: Dict[str, str]
:return: dictionary of model that is about to be saved
:rtype: Dict[str, str]
"""
new_kwargs = cls._remove_pk_from_kwargs(new_kwargs)
new_kwargs = cls.substitute_models_with_pks(new_kwargs)
new_kwargs = cls.populate_default_values(new_kwargs)
new_kwargs = cls.translate_columns_to_aliases(new_kwargs)
return new_kwargs
@classmethod
def _remove_pk_from_kwargs(cls, new_kwargs: dict) -> dict:
"""
Removes primary key for if it's nullable or autoincrement pk field,
and it's set to None.
:param new_kwargs: dictionary of model that is about to be saved
:type new_kwargs: Dict[str, str]
:return: dictionary of model that is about to be saved
:rtype: Dict[str, str]
"""
pkname = cls.Meta.pkname
pk = cls.Meta.model_fields[pkname]
if new_kwargs.get(pkname, ormar.Undefined) is None and (
pk.nullable or pk.autoincrement
):
del new_kwargs[pkname]
return new_kwargs
@classmethod
def substitute_models_with_pks(cls, model_dict: Dict) -> Dict: # noqa CCR001
"""
Receives dictionary of model that is about to be saved and changes all related
models that are stored as foreign keys to their fk value.
:param model_dict: dictionary of model that is about to be saved
:type model_dict: Dict
:return: dictionary of model that is about to be saved
:rtype: Dict
"""
for field in cls.extract_related_names():
field_value = model_dict.get(field, None)
if field_value is not None:
target_field = cls.Meta.model_fields[field]
target_pkname = target_field.to.Meta.pkname
if isinstance(field_value, ormar.Model):
pk_value = getattr(field_value, target_pkname)
if not pk_value:
raise ModelPersistenceError(
f"You cannot save {field_value.get_name()} "
f"model without pk set!"
)
model_dict[field] = pk_value
elif field_value: # nested dict
if isinstance(field_value, list):
model_dict[field] = [
target.get(target_pkname) for target in field_value
]
else:
model_dict[field] = field_value.get(target_pkname)
else:
model_dict.pop(field, None)
return model_dict
@classmethod
def populate_default_values(cls, new_kwargs: Dict) -> Dict:
"""
Receives dictionary of model that is about to be saved and populates the default
value on the fields that have the default value set, but no actual value was
passed by the user.
:param new_kwargs: dictionary of model that is about to be saved
:type new_kwargs: Dict
:return: dictionary of model that is about to be saved
:rtype: Dict
"""
for field_name, field in cls.Meta.model_fields.items():
if (
field_name not in new_kwargs
and field.has_default(use_server=False)
and not field.pydantic_only
):
new_kwargs[field_name] = field.get_default()
# clear fields with server_default set as None
if field.server_default is not None and not new_kwargs.get(field_name):
new_kwargs.pop(field_name, None)
return new_kwargs

View File

@ -22,6 +22,20 @@ from ormar.models.metaclass import ModelMeta
def group_related_list(list_: List) -> Dict: def group_related_list(list_: List) -> Dict:
"""
Translates the list of related strings into a dictionary.
That way nested models are grouped to traverse them in a right order
and to avoid repetition.
Sample: ["people__houses", "people__cars__models", "people__cars__colors"]
will become:
{'people': {'houses': [], 'cars': ['models', 'colors']}}
:param list_: list of related models used in select related
:type list_: List[str]
:return: list converted to dictionary to avoid repetition and group nested models
:rtype: Dict[str, List]
"""
test_dict: Dict[str, Any] = dict() test_dict: Dict[str, Any] = dict()
grouped = itertools.groupby(list_, key=lambda x: x.split("__")[0]) grouped = itertools.groupby(list_, key=lambda x: x.split("__")[0])
for key, group in grouped: for key, group in grouped:
@ -63,7 +77,38 @@ class Model(NewBaseModel):
fields: Optional[Union[Dict, Set]] = None, fields: Optional[Union[Dict, Set]] = None,
exclude_fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None,
) -> Optional[T]: ) -> Optional[T]:
"""
Model method to convert raw sql row from database into ormar.Model instance.
Traverses nested models if they were specified in select_related for query.
Called recurrently and returns model instance if it's present in the row.
Note that it's processing one row at a time, so if there are duplicates of
parent row that needs to be joined/combined
(like parent row in sql join with 2+ child rows)
instances populated in this method are later combined in the QuerySet.
Other method working directly on raw database results is in prefetch_query,
where rows are populated in a different way as they do not have
nested models in result.
:param row: raw result row from the database
:type row: sqlalchemy.engine.result.ResultProxy
:param select_related: list of names of related models fetched from database
:type select_related: List
:param related_models: list or dict of related models
:type related_models: Union[List, Dict]
:param previous_model: internal param for nested models to specify table_prefix
:type previous_model: Model class
:param related_name: internal parameter - name of current nested model
:type related_name: str
:param fields: fields and related model fields to include
if provided only those are included
:type fields: Optional[Union[Dict, Set]]
:param exclude_fields: fields and related model fields to exclude
excludes the fields even if they are provided in fields
:type exclude_fields: Optional[Union[Dict, Set]]
:return: returns model if model is populated from database
:rtype: Optional[Model]
"""
item: Dict[str, Any] = {} item: Dict[str, Any] = {}
select_related = select_related or [] select_related = select_related or []
related_models = related_models or [] related_models = related_models or []
@ -80,13 +125,11 @@ class Model(NewBaseModel):
) )
): ):
through_field = previous_model.Meta.model_fields[related_name] through_field = previous_model.Meta.model_fields[related_name]
rel_name2 = previous_model.resolve_relation_name( rel_name2 = through_field.default_target_field_name() # type: ignore
through_field.through, through_field.to, explicit_multi=True
)
previous_model = through_field.through # type: ignore previous_model = through_field.through # type: ignore
if previous_model and rel_name2: if previous_model and rel_name2:
table_prefix = cls.Meta.alias_manager.resolve_relation_join_new( table_prefix = cls.Meta.alias_manager.resolve_relation_alias(
previous_model, rel_name2 previous_model, rel_name2
) )
else: else:
@ -127,6 +170,32 @@ class Model(NewBaseModel):
fields: Optional[Union[Dict, Set]] = None, fields: Optional[Union[Dict, Set]] = None,
exclude_fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None,
) -> dict: ) -> dict:
"""
Traverses structure of related models and populates the nested models
from the database row.
Related models can be a list if only directly related models are to be
populated, converted to dict if related models also have their own related
models to be populated.
Recurrently calls from_row method on nested instances and create nested
instances. In the end those instances are added to the final model dictionary.
:param item: dictionary of already populated nested models, otherwise empty dict
:type item: Dict
:param row: raw result row from the database
:type row: sqlalchemy.engine.result.ResultProxy
:param related_models: list or dict of related models
:type related_models: Union[Dict, List]
:param fields: fields and related model fields to include -
if provided only those are included
:type fields: Optional[Union[Dict, Set]]
:param exclude_fields: fields and related model fields to exclude
excludes the fields even if they are provided in fields
:type exclude_fields: Optional[Union[Dict, Set]]
:return: dictionary with keys corresponding to model fields names
and values are database values
:rtype: Dict
"""
for related in related_models: for related in related_models:
if isinstance(related_models, dict) and related_models[related]: if isinstance(related_models, dict) and related_models[related]:
first_part, remainder = related, related_models[related] first_part, remainder = related, related_models[related]
@ -168,7 +237,36 @@ class Model(NewBaseModel):
fields: Optional[Union[Dict, Set]] = None, fields: Optional[Union[Dict, Set]] = None,
exclude_fields: Optional[Union[Dict, Set]] = None, exclude_fields: Optional[Union[Dict, Set]] = None,
) -> dict: ) -> dict:
"""
Extracts own fields from raw sql result, using a given prefix.
Prefix changes depending on the table's position in a join.
If the table is a main table, there is no prefix.
All joined tables have prefixes to allow duplicate column names,
as well as duplicated joins to the same table from multiple different tables.
Extracted fields populates the item dict later used to construct a Model.
Used in Model.from_row and PrefetchQuery._populate_rows methods.
:param item: dictionary of already populated nested models, otherwise empty dict
:type item: Dict
:param row: raw result row from the database
:type row: sqlalchemy.engine.result.ResultProxy
:param table_prefix: prefix of the table from AliasManager
each pair of tables have own prefix (two of them depending on direction) -
used in joins to allow multiple joins to the same table.
:type table_prefix: str
:param fields: fields and related model fields to include -
if provided only those are included
:type fields: Optional[Union[Dict, Set]]
:param exclude_fields: fields and related model fields to exclude
excludes the fields even if they are provided in fields
:type exclude_fields: Optional[Union[Dict, Set]]
:return: dictionary with keys corresponding to model fields names
and values are database values
:rtype: Dict
"""
# databases does not keep aliases in Record for postgres, change to raw row # databases does not keep aliases in Record for postgres, change to raw row
source = row._row if cls.db_backend_name() == "postgresql" else row source = row._row if cls.db_backend_name() == "postgresql" else row
@ -190,17 +288,49 @@ class Model(NewBaseModel):
return item return item
async def upsert(self: T, **kwargs: Any) -> T: async def upsert(self: T, **kwargs: Any) -> T:
"""
Performs either a save or an update depending on the presence of the pk.
If the pk field is filled it's an update, otherwise the save is performed.
For save kwargs are ignored, used only in update if provided.
:param kwargs: list of fields to update
:type kwargs: Any
:return: saved Model
:rtype: Model
"""
if not self.pk: if not self.pk:
return await self.save() return await self.save()
return await self.update(**kwargs) return await self.update(**kwargs)
async def save(self: T) -> T: async def save(self: T) -> T:
"""
Performs a save of given Model instance.
If primary key is already saved, db backend will throw integrity error.
Related models are saved by pk number, reverse relation and many to many fields
are not saved - use corresponding relations methods.
If there are fields with server_default set and those fields
are not already filled save will trigger also a second query
to refreshed the fields populated server side.
Does not recognize if model was previously saved.
If you want to perform update or insert depending on the pk
fields presence use upsert.
Sends pre_save and post_save signals.
Sets model save status to True.
:return: saved Model
:rtype: Model
"""
self_fields = self._extract_model_db_fields() self_fields = self._extract_model_db_fields()
if not self.pk and self.Meta.model_fields[self.Meta.pkname].autoincrement: if not self.pk and self.Meta.model_fields[self.Meta.pkname].autoincrement:
self_fields.pop(self.Meta.pkname, None) self_fields.pop(self.Meta.pkname, None)
self_fields = self.populate_default_values(self_fields) self_fields = self.populate_default_values(self_fields)
self.from_dict( self.update_from_dict(
{ {
k: v k: v
for k, v in self_fields.items() for k, v in self_fields.items()
@ -233,6 +363,33 @@ class Model(NewBaseModel):
async def save_related( # noqa: CCR001 async def save_related( # noqa: CCR001
self, follow: bool = False, visited: Set = None, update_count: int = 0 self, follow: bool = False, visited: Set = None, update_count: int = 0
) -> int: # noqa: CCR001 ) -> int: # noqa: CCR001
"""
Triggers a upsert method on all related models
if the instances are not already saved.
By default saves only the directly related ones.
If follow=True is set it saves also related models of related models.
To not get stuck in an infinite loop as related models also keep a relation
to parent model visited models set is kept.
That way already visited models that are nested are saved, but the save do not
follow them inside. So Model A -> Model B -> Model A -> Model C will save second
Model A but will never follow into Model C.
Nested relations of those kind need to be persisted manually.
:param follow: flag to trigger deep save -
by default only directly related models are saved
with follow=True also related models of related models are saved
:type follow: bool
:param visited: internal parameter for recursive calls - already visited models
:type visited: Set
:param update_count: internal parameter for recursive calls -
number of updated instances
:type update_count: int
:return: number of updated/saved models
:rtype: int
"""
if not visited: if not visited:
visited = {self.__class__} visited = {self.__class__}
else: else:
@ -263,6 +420,24 @@ class Model(NewBaseModel):
async def _update_and_follow( async def _update_and_follow(
rel: T, follow: bool, visited: Set, update_count: int rel: T, follow: bool, visited: Set, update_count: int
) -> Tuple[int, Set]: ) -> Tuple[int, Set]:
"""
Internal method used in save_related to follow related models and update numbers
of updated related instances.
:param rel: Model to follow
:type rel: Model
:param follow: flag to trigger deep save -
by default only directly related models are saved
with follow=True also related models of related models are saved
:type follow: bool
:param visited: internal parameter for recursive calls - already visited models
:type visited: Set
:param update_count: internal parameter for recursive calls -
number of updated instances
:type update_count: int
:return: tuple of update count and visited
:rtype: Tuple[int, Set]
"""
if follow and rel.__class__ not in visited: if follow and rel.__class__ not in visited:
update_count = await rel.save_related( update_count = await rel.save_related(
follow=follow, visited=visited, update_count=update_count follow=follow, visited=visited, update_count=update_count
@ -273,8 +448,23 @@ class Model(NewBaseModel):
return update_count, visited return update_count, visited
async def update(self: T, **kwargs: Any) -> T: async def update(self: T, **kwargs: Any) -> T:
"""
Performs update of Model instance in the database.
Fields can be updated before or you can pass them as kwargs.
Sends pre_update and post_update signals.
Sets model save status to True.
:raises ModelPersistenceError: If the pk column is not set
:param kwargs: list of fields to update as field=value pairs
:type kwargs: Any
:return: updated Model
:rtype: Model
"""
if kwargs: if kwargs:
self.from_dict(kwargs) self.update_from_dict(kwargs)
if not self.pk: if not self.pk:
raise ModelPersistenceError( raise ModelPersistenceError(
@ -294,6 +484,20 @@ class Model(NewBaseModel):
return self return self
async def delete(self: T) -> int: async def delete(self: T) -> int:
"""
Removes the Model instance from the database.
Sends pre_delete and post_delete signals.
Sets model save status to False.
Note it does not delete the Model itself (python object).
So you can delete and later save (since pk is deleted no conflict will arise)
or update and the Model will be saved in database again.
:return: number of deleted rows (for some backends)
:rtype: int
"""
await self.signals.pre_delete.send(sender=self.__class__, instance=self) await self.signals.pre_delete.send(sender=self.__class__, instance=self)
expr = self.Meta.table.delete() expr = self.Meta.table.delete()
expr = expr.where(self.pk_column == (getattr(self, self.Meta.pkname))) expr = expr.where(self.pk_column == (getattr(self, self.Meta.pkname)))
@ -303,12 +507,22 @@ class Model(NewBaseModel):
return result return result
async def load(self: T) -> T: async def load(self: T) -> T:
"""
Allow to refresh existing Models fields from database.
Be careful as the related models can be overwritten by pk_only models in load.
Does NOT refresh the related models fields if they were loaded before.
:raises NoMatch: If given pk is not found in database.
:return: reloaded Model
:rtype: Model
"""
expr = self.Meta.table.select().where(self.pk_column == self.pk) expr = self.Meta.table.select().where(self.pk_column == self.pk)
row = await self.Meta.database.fetch_one(expr) row = await self.Meta.database.fetch_one(expr)
if not row: # pragma nocover if not row: # pragma nocover
raise NoMatch("Instance was deleted from database and cannot be refreshed") raise NoMatch("Instance was deleted from database and cannot be refreshed")
kwargs = dict(row) kwargs = dict(row)
kwargs = self.translate_aliases_to_columns(kwargs) kwargs = self.translate_aliases_to_columns(kwargs)
self.from_dict(kwargs) self.update_from_dict(kwargs)
self.set_save_status(True) self.set_save_status(True)
return self return self

View File

@ -1,420 +1,15 @@
import inspect from ormar.models.mixins import (
from collections import OrderedDict ExcludableMixin,
from typing import ( MergeModelMixin,
AbstractSet, PrefetchQueryMixin,
Any, SavePrepareMixin,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
TYPE_CHECKING,
Tuple,
Type,
TypeVar,
Union,
) )
from ormar.exceptions import ModelPersistenceError, RelationshipInstanceError
from ormar.queryset.utils import translate_list_to_dict, update
import ormar # noqa: I100 class ModelTableProxy(
from ormar.fields import BaseField, ManyToManyField PrefetchQueryMixin, MergeModelMixin, SavePrepareMixin, ExcludableMixin
from ormar.fields.foreign_key import ForeignKeyField ):
from ormar.models.metaclass import ModelMeta """
Used to combine all mixins with different set of functionalities.
if TYPE_CHECKING: # pragma no cover One of the bases of the ormar Model class.
from ormar import Model """
from ormar.models import NewBaseModel
T = TypeVar("T", bound=Model)
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
MappingIntStrAny = Mapping[IntStr, Any]
Field = TypeVar("Field", bound=BaseField)
class ModelTableProxy:
if TYPE_CHECKING: # pragma no cover
Meta: ModelMeta
_related_names: Optional[Set]
_related_names_hash: Union[str, bytes]
pk: Any
get_name: Callable
_props: Set
dict: Callable # noqa: A001, VNE003
def _extract_own_model_fields(self) -> Dict:
related_names = self.extract_related_names()
self_fields = self.dict(exclude=related_names)
return self_fields
@classmethod
def get_related_field_name(cls, target_field: Type["BaseField"]) -> str:
if issubclass(target_field, ormar.fields.ManyToManyField):
return cls.resolve_relation_name(target_field.through, cls)
if target_field.virtual:
return cls.resolve_relation_name(target_field.to, cls)
return target_field.to.Meta.pkname
@staticmethod
def get_clause_target_and_filter_column_name(
parent_model: Type["Model"],
target_model: Type["Model"],
reverse: bool,
related: str,
) -> Tuple[Type["Model"], str]:
if reverse:
field_name = (
parent_model.Meta.model_fields[related].related_name
or parent_model.get_name() + "s"
)
field = target_model.Meta.model_fields[field_name]
if issubclass(field, ormar.fields.ManyToManyField):
sub_field = target_model.resolve_relation_field(
field.through, parent_model
)
return field.through, sub_field.get_alias()
return target_model, field.get_alias()
target_field = target_model.get_column_alias(target_model.Meta.pkname)
return target_model, target_field
@staticmethod
def get_column_name_for_id_extraction(
parent_model: Type["Model"],
target_model: Type["Model"],
reverse: bool,
use_raw: bool,
) -> str:
if reverse:
column_name = parent_model.Meta.pkname
return (
parent_model.get_column_alias(column_name) if use_raw else column_name
)
column = target_model.resolve_relation_field(parent_model, target_model)
return column.get_alias() if use_raw else column.name
@classmethod
def get_filtered_names_to_extract(cls, prefetch_dict: Dict) -> List:
related_to_extract = []
if prefetch_dict and prefetch_dict is not Ellipsis:
related_to_extract = [
related
for related in cls.extract_related_names()
if related in prefetch_dict
]
return related_to_extract
def get_relation_model_id(self, target_field: Type["BaseField"]) -> Optional[int]:
if target_field.virtual or issubclass(
target_field, ormar.fields.ManyToManyField
):
return self.pk
related_name = self.resolve_relation_name(self, target_field.to)
related_model = getattr(self, related_name)
return None if not related_model else related_model.pk
@classmethod
def extract_db_own_fields(cls) -> Set:
related_names = cls.extract_related_names()
self_fields = {
name for name in cls.Meta.model_fields.keys() if name not in related_names
}
return self_fields
@classmethod
def get_names_to_exclude(
cls,
fields: Optional[Union[Dict, Set]] = None,
exclude_fields: Optional[Union[Dict, Set]] = None,
) -> Set:
fields_names = cls.extract_db_own_fields()
if fields and fields is not Ellipsis:
fields_to_keep = {name for name in fields if name in fields_names}
else:
fields_to_keep = fields_names
fields_to_exclude = fields_names - fields_to_keep
if isinstance(exclude_fields, Set):
fields_to_exclude = fields_to_exclude.union(
{name for name in exclude_fields if name in fields_names}
)
elif isinstance(exclude_fields, Dict):
new_to_exclude = {
name
for name in exclude_fields
if name in fields_names and exclude_fields[name] is Ellipsis
}
fields_to_exclude = fields_to_exclude.union(new_to_exclude)
fields_to_exclude = fields_to_exclude - {cls.Meta.pkname}
return fields_to_exclude
@classmethod
def substitute_models_with_pks(cls, model_dict: Dict) -> Dict: # noqa CCR001
for field in cls.extract_related_names():
field_value = model_dict.get(field, None)
if field_value is not None:
target_field = cls.Meta.model_fields[field]
target_pkname = target_field.to.Meta.pkname
if isinstance(field_value, ormar.Model):
pk_value = getattr(field_value, target_pkname)
if not pk_value:
raise ModelPersistenceError(
f"You cannot save {field_value.get_name()} "
f"model without pk set!"
)
model_dict[field] = pk_value
elif field_value: # nested dict
if isinstance(field_value, list):
model_dict[field] = [
target.get(target_pkname) for target in field_value
]
else:
model_dict[field] = field_value.get(target_pkname)
else:
model_dict.pop(field, None)
return model_dict
@classmethod
def populate_default_values(cls, new_kwargs: Dict) -> Dict:
for field_name, field in cls.Meta.model_fields.items():
if (
field_name not in new_kwargs
and field.has_default(use_server=False)
and not field.pydantic_only
):
new_kwargs[field_name] = field.get_default()
# clear fields with server_default set as None
if field.server_default is not None and not new_kwargs.get(field_name):
new_kwargs.pop(field_name, None)
return new_kwargs
@classmethod
def get_column_alias(cls, field_name: str) -> str:
field = cls.Meta.model_fields.get(field_name)
if field is not None and field.alias is not None:
return field.alias
return field_name
@classmethod
def get_column_name_from_alias(cls, alias: str) -> str:
for field_name, field in cls.Meta.model_fields.items():
if field is not None and field.alias == alias:
return field_name
return alias # if not found it's not an alias but actual name
@classmethod
def extract_related_names(cls) -> Set:
if isinstance(cls._related_names, Set):
return cls._related_names
related_names = set()
for name, field in cls.Meta.model_fields.items():
if inspect.isclass(field) and issubclass(field, ForeignKeyField):
related_names.add(name)
cls._related_names = related_names
return related_names
@classmethod
def _extract_db_related_names(cls) -> Set:
related_names = cls.extract_related_names()
related_names = {
name
for name in related_names
if cls.Meta.model_fields[name].is_valid_uni_relation()
}
return related_names
@classmethod
def _exclude_related_names_not_required(cls, nested: bool = False) -> Set:
if nested:
return cls.extract_related_names()
related_names = cls.extract_related_names()
related_names = {
name for name in related_names if cls.Meta.model_fields[name].nullable
}
return related_names
@classmethod
def _update_excluded_with_related_not_required(
cls,
exclude: Union["AbstractSetIntStr", "MappingIntStrAny", None],
nested: bool = False,
) -> Union[Set, Dict]:
exclude = exclude or {}
related_set = cls._exclude_related_names_not_required(nested=nested)
if isinstance(exclude, set):
exclude.union(related_set)
else:
related_dict = translate_list_to_dict(related_set)
exclude = update(related_dict, exclude)
return exclude
def _extract_model_db_fields(self) -> Dict:
self_fields = self._extract_own_model_fields()
self_fields = {
k: v
for k, v in self_fields.items()
if self.get_column_alias(k) in self.Meta.table.columns
}
for field in self._extract_db_related_names():
target_pk_name = self.Meta.model_fields[field].to.Meta.pkname
target_field = getattr(self, field)
self_fields[field] = getattr(target_field, target_pk_name, None)
return self_fields
@staticmethod
def resolve_relation_name( # noqa CCR001
item: Union[
"NewBaseModel",
Type["NewBaseModel"],
"ModelTableProxy",
Type["ModelTableProxy"],
],
related: Union[
"NewBaseModel",
Type["NewBaseModel"],
"ModelTableProxy",
Type["ModelTableProxy"],
],
explicit_multi: bool = False,
) -> str:
for name, field in item.Meta.model_fields.items():
# fastapi is creating clones of response model
# that's why it can be a subclass of the original model
# so we need to compare Meta too as this one is copied as is
if issubclass(field, ManyToManyField):
attrib = "to" if not explicit_multi else "through"
if (
getattr(field, attrib) == related.__class__
or getattr(field, attrib).Meta == related.Meta
):
return name
elif issubclass(field, ForeignKeyField):
if field.to == related.__class__ or field.to.Meta == related.Meta:
return name
raise ValueError(
f"No relation between {item.get_name()} and {related.get_name()}"
) # pragma nocover
@staticmethod
def resolve_relation_field(
item: Union["Model", Type["Model"]], related: Union["Model", Type["Model"]]
) -> Type[BaseField]:
name = ModelTableProxy.resolve_relation_name(item, related)
to_field = item.Meta.model_fields.get(name)
if not to_field: # pragma no cover
raise RelationshipInstanceError(
f"Model {item.__class__} does not have "
f"reference to model {related.__class__}"
)
return to_field
@classmethod
def translate_columns_to_aliases(cls, new_kwargs: Dict) -> Dict:
for field_name, field in cls.Meta.model_fields.items():
if field_name in new_kwargs:
new_kwargs[field.get_alias()] = new_kwargs.pop(field_name)
return new_kwargs
@classmethod
def translate_aliases_to_columns(cls, new_kwargs: Dict) -> Dict:
for field_name, field in cls.Meta.model_fields.items():
if field.alias and field.alias in new_kwargs:
new_kwargs[field_name] = new_kwargs.pop(field.alias)
return new_kwargs
@classmethod
def merge_instances_list(cls, result_rows: Sequence["Model"]) -> Sequence["Model"]:
merged_rows: List["Model"] = []
grouped_instances: OrderedDict = OrderedDict()
for model in result_rows:
grouped_instances.setdefault(model.pk, []).append(model)
for group in grouped_instances.values():
model = group.pop(0)
if group:
for next_model in group:
model = cls.merge_two_instances(next_model, model)
merged_rows.append(model)
return merged_rows
@classmethod
def merge_two_instances(cls, one: "Model", other: "Model") -> "Model":
for field in one.Meta.model_fields.keys():
current_field = getattr(one, field)
if isinstance(current_field, list) and not isinstance(
current_field, ormar.Model
):
setattr(other, field, current_field + getattr(other, field))
elif (
isinstance(current_field, ormar.Model)
and current_field.pk == getattr(other, field).pk
):
setattr(
other,
field,
cls.merge_two_instances(current_field, getattr(other, field)),
)
other.set_save_status(True)
return other
@staticmethod
def _populate_pk_column(
model: Type["Model"], columns: List[str], use_alias: bool = False,
) -> List[str]:
pk_alias = (
model.get_column_alias(model.Meta.pkname)
if use_alias
else model.Meta.pkname
)
if pk_alias not in columns:
columns.append(pk_alias)
return columns
@staticmethod
def own_table_columns(
model: Type["Model"],
fields: Optional[Union[Set, Dict]],
exclude_fields: Optional[Union[Set, Dict]],
use_alias: bool = False,
) -> List[str]:
columns = [
model.get_column_name_from_alias(col.name) if not use_alias else col.name
for col in model.Meta.table.columns
]
field_names = [
model.get_column_name_from_alias(col.name)
for col in model.Meta.table.columns
]
if fields:
columns = [
col
for col, name in zip(columns, field_names)
if model.is_included(fields, name)
]
if exclude_fields:
columns = [
col
for col, name in zip(columns, field_names)
if not model.is_excluded(exclude_fields, name)
]
# always has to return pk column
columns = ModelTableProxy._populate_pk_column(
model=model, columns=columns, use_alias=use_alias
)
return columns

View File

@ -1,4 +1,7 @@
import json try:
import orjson as json
except ImportError: # pragma: no cover
import json # type: ignore
import uuid import uuid
from typing import ( from typing import (
AbstractSet, AbstractSet,
@ -25,8 +28,6 @@ from pydantic import BaseModel
import ormar # noqa I100 import ormar # noqa I100
from ormar.exceptions import ModelError from ormar.exceptions import ModelError
from ormar.fields import BaseField from ormar.fields import BaseField
from ormar.fields.foreign_key import ForeignKeyField
from ormar.models.excludable import Excludable
from ormar.models.metaclass import ModelMeta, ModelMetaclass from ormar.models.metaclass import ModelMeta, ModelMetaclass
from ormar.models.modelproxy import ModelTableProxy from ormar.models.modelproxy import ModelTableProxy
from ormar.queryset.utils import translate_list_to_dict from ormar.queryset.utils import translate_list_to_dict
@ -45,9 +46,16 @@ if TYPE_CHECKING: # pragma no cover
MappingIntStrAny = Mapping[IntStr, Any] MappingIntStrAny = Mapping[IntStr, Any]
class NewBaseModel( class NewBaseModel(pydantic.BaseModel, ModelTableProxy, metaclass=ModelMetaclass):
pydantic.BaseModel, ModelTableProxy, Excludable, metaclass=ModelMetaclass """
): Main base class of ormar Model.
Inherits from pydantic BaseModel and has all mixins combined in ModelTableProxy.
Constructed with ModelMetaclass which in turn also inherits pydantic metaclass.
Abstracts away all internals and helper functions, so final Model class has only
the logic concerned with database connection and data persistance.
"""
__slots__ = ("_orm_id", "_orm_saved", "_orm", "_pk_column") __slots__ = ("_orm_id", "_orm_saved", "_orm", "_pk_column")
if TYPE_CHECKING: # pragma no cover if TYPE_CHECKING: # pragma no cover
@ -70,20 +78,46 @@ class NewBaseModel(
# noinspection PyMissingConstructor # noinspection PyMissingConstructor
def __init__(self, *args: Any, **kwargs: Any) -> None: # type: ignore def __init__(self, *args: Any, **kwargs: Any) -> None: # type: ignore
"""
Initializer that creates a new ormar Model that is also pydantic Model at the
same time.
Passed keyword arguments can be only field names and their corresponding values
as those will be passed to pydantic validation that will complain if extra
params are passed.
If relations are defined each relation is expanded and children models are also
initialized and validated. Relation from both sides is registered so you can
access related models from both sides.
Json fields are automatically loaded/dumped if needed.
Models marked as abstract=True in internal Meta class cannot be initialized.
Accepts also special __pk_only__ flag that indicates that Model is constructed
only with primary key value (so no other fields, it's a child model on other
Model), that causes skipping the validation, that's the only case when the
validation can be skipped.
Accepts also special __excluded__ parameter that contains a set of fields that
should be explicitly set to None, as otherwise pydantic will try to populate
them with their default values if default is set.
:raises ModelError: if abstract model is initialized or unknown field is passed
:param args: ignored args
:type args: Any
:param kwargs: keyword arguments - all fields values and some special params
:type kwargs: Any
"""
if self.Meta.abstract:
raise ModelError(f"You cannot initialize abstract model {self.get_name()}")
object.__setattr__(self, "_orm_id", uuid.uuid4().hex) object.__setattr__(self, "_orm_id", uuid.uuid4().hex)
object.__setattr__(self, "_orm_saved", False) object.__setattr__(self, "_orm_saved", False)
object.__setattr__(self, "_pk_column", None) object.__setattr__(self, "_pk_column", None)
object.__setattr__( object.__setattr__(
self, self,
"_orm", "_orm",
RelationsManager( RelationsManager(related_fields=self.extract_related_fields(), owner=self,),
related_fields=[
field
for name, field in self.Meta.model_fields.items()
if issubclass(field, ForeignKeyField)
],
owner=self,
),
) )
pk_only = kwargs.pop("__pk_only__", False) pk_only = kwargs.pop("__pk_only__", False)
@ -132,6 +166,32 @@ class NewBaseModel(
) )
def __setattr__(self, name: str, value: Any) -> None: # noqa CCR001 def __setattr__(self, name: str, value: Any) -> None: # noqa CCR001
"""
Overwrites setattr in object to allow for special behaviour of certain params.
Parameter "pk" is translated into actual primary key field name.
Relations are expanded (child model constructed if needed) and registered on
both ends of the relation. The related models are handled by RelationshipManager
exposed at _orm param.
Json fields converted if needed.
Setting pk, foreign key value or any other field value sets Model save status
to False. Setting a reverse relation or many to many relation does not as it
does not modify the state of the model (but related model or through model).
To short circuit all checks and expansions the set of attribute names present
on each model is gathered into _quick_access_fields that is looked first and
if field is in this set the object setattr is called directly.
:param name: name of the attribute to set
:type name: str
:param value: value of the attribute to set
:type value: Any
:return: None
:rtype: None
"""
if name in object.__getattribute__(self, "_quick_access_fields"): if name in object.__getattribute__(self, "_quick_access_fields"):
object.__setattr__(self, name, value) object.__setattr__(self, name, value)
elif name == "pk": elif name == "pk":
@ -158,6 +218,36 @@ class NewBaseModel(
self.set_save_status(False) self.set_save_status(False)
def __getattribute__(self, item: str) -> Any: def __getattribute__(self, item: str) -> Any:
"""
Because we need to overwrite getting the attribute by ormar instead of pydantic
as well as returning related models and not the value stored on the model the
__getattribute__ needs to be used not __getattr__.
It's used to access all attributes so it can be a big overhead that's why a
number of short circuits is used.
To short circuit all checks and expansions the set of attribute names present
on each model is gathered into _quick_access_fields that is looked first and
if field is in this set the object setattr is called directly.
To avoid recursion object's getattribute is used to actually get the attribute
value from the model after the checks.
Even the function calls are constructed with objects functions.
Parameter "pk" is translated into actual primary key field name.
Relations are returned so the actual related model is returned and not current
model's field. The related models are handled by RelationshipManager exposed
at _orm param.
Json fields are converted if needed.
:param item: name of the attribute to retrieve
:type item: str
:return: value of the attribute
:rtype: Any
"""
if item in object.__getattribute__(self, "_quick_access_fields"): if item in object.__getattribute__(self, "_quick_access_fields"):
return object.__getattribute__(self, item) return object.__getattribute__(self, item)
if item == "pk": if item == "pk":
@ -178,16 +268,42 @@ class NewBaseModel(
def _extract_related_model_instead_of_field( def _extract_related_model_instead_of_field(
self, item: str self, item: str
) -> Optional[Union["T", Sequence["T"]]]: ) -> Optional[Union["T", Sequence["T"]]]:
"""
Retrieves the related model/models from RelationshipManager.
:param item: name of the relation
:type item: str
:return: related model, list of related models or None
:rtype: Optional[Union[Model, List[Model]]]
"""
if item in self._orm: if item in self._orm:
return self._orm.get(item) return self._orm.get(item)
return None # pragma no cover return None # pragma no cover
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
"""
Compares other model to this model. when == is called.
:param other: other model to compare
:type other: object
:return: result of comparison
:rtype: bool
"""
if isinstance(other, NewBaseModel): if isinstance(other, NewBaseModel):
return self.__same__(other) return self.__same__(other)
return super().__eq__(other) # pragma no cover return super().__eq__(other) # pragma no cover
def __same__(self, other: "NewBaseModel") -> bool: def __same__(self, other: "NewBaseModel") -> bool:
"""
Used by __eq__, compares other model to this model.
Compares:
* _orm_ids,
* primary key values if it's set
* dictionary of own fields (excluding relations)
:param other: model to compare to
:type other: NewBaseModel
:return: result of comparison
:rtype: bool
"""
return ( return (
self._orm_id == other._orm_id self._orm_id == other._orm_id
or (self.pk == other.pk and self.pk is not None) or (self.pk == other.pk and self.pk is not None)
@ -197,6 +313,14 @@ class NewBaseModel(
@classmethod @classmethod
def get_name(cls, lower: bool = True) -> str: def get_name(cls, lower: bool = True) -> str:
"""
Returns name of the Model class, by default lowercase.
:param lower: flag if name should be set to lowercase
:type lower: bool
:return: name of the model
:rtype: str
"""
name = cls.__name__ name = cls.__name__
if lower: if lower:
name = name.lower() name = name.lower()
@ -204,6 +328,14 @@ class NewBaseModel(
@property @property
def pk_column(self) -> sqlalchemy.Column: def pk_column(self) -> sqlalchemy.Column:
"""
Retrieves primary key sqlalchemy column from models Meta.table.
Each model has to have primary key.
Only one primary key column is allowed.
:return: primary key sqlalchemy column
:rtype: sqlalchemy.Column
"""
if object.__getattribute__(self, "_pk_column") is not None: if object.__getattribute__(self, "_pk_column") is not None:
return object.__getattribute__(self, "_pk_column") return object.__getattribute__(self, "_pk_column")
pk_columns = self.Meta.table.primary_key.columns.values() pk_columns = self.Meta.table.primary_key.columns.values()
@ -213,30 +345,51 @@ class NewBaseModel(
@property @property
def saved(self) -> bool: def saved(self) -> bool:
"""Saved status of the model. Changed by setattr and loading from db"""
return self._orm_saved return self._orm_saved
@property @property
def signals(self) -> "SignalEmitter": def signals(self) -> "SignalEmitter":
"""Exposes signals from model Meta"""
return self.Meta.signals return self.Meta.signals
@classmethod @classmethod
def pk_type(cls) -> Any: def pk_type(cls) -> Any:
"""Shortcut to models primary key field type"""
return cls.Meta.model_fields[cls.Meta.pkname].__type__ return cls.Meta.model_fields[cls.Meta.pkname].__type__
@classmethod @classmethod
def db_backend_name(cls) -> str: def db_backend_name(cls) -> str:
"""Shortcut to database dialect,
cause some dialect require different treatment"""
return cls.Meta.database._backend._dialect.name return cls.Meta.database._backend._dialect.name
def remove(self, name: "T") -> None: def remove(self, parent: "T", name: str) -> None:
self._orm.remove_parent(self, name) """Removes child from relation with given name in RelationshipManager"""
self._orm.remove_parent(self, parent, name)
def set_save_status(self, status: bool) -> None: def set_save_status(self, status: bool) -> None:
"""Sets value of the save status"""
object.__setattr__(self, "_orm_saved", status) object.__setattr__(self, "_orm_saved", status)
@classmethod @classmethod
def get_properties( def get_properties(
cls, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None] cls, include: Union[Set, Dict, None], exclude: Union[Set, Dict, None]
) -> Set[str]: ) -> Set[str]:
"""
Returns a set of names of functions/fields decorated with
@property_field decorator.
They are added to dictionary when called directly and therefore also are
present in fastapi responses.
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:return: set of property fields names
:rtype: Set[str]
"""
props = cls.Meta.property_fields props = cls.Meta.property_fields
if include: if include:
@ -248,6 +401,16 @@ class NewBaseModel(
def _get_related_not_excluded_fields( def _get_related_not_excluded_fields(
self, include: Optional[Dict], exclude: Optional[Dict], self, include: Optional[Dict], exclude: Optional[Dict],
) -> List: ) -> List:
"""
Returns related field names applying on them include and exclude set.
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:return:
:rtype: List of fields with relations that is not excluded
"""
fields = [field for field in self.extract_related_names()] fields = [field for field in self.extract_related_names()]
if include: if include:
fields = [field for field in fields if field in include] fields = [field for field in fields if field in include]
@ -265,6 +428,18 @@ class NewBaseModel(
include: Union[Set, Dict, None], include: Union[Set, Dict, None],
exclude: Union[Set, Dict, None], exclude: Union[Set, Dict, None],
) -> List: ) -> List:
"""
Converts list of models into list of dictionaries.
:param models: List of models
:type models: List
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:return: list of models converted to dictionaries
:rtype: List[Dict]
"""
result = [] result = []
for model in models: for model in models:
try: try:
@ -275,11 +450,22 @@ class NewBaseModel(
continue continue
return result return result
@staticmethod
def _skip_ellipsis( def _skip_ellipsis(
items: Union[Set, Dict, None], key: str self, items: Union[Set, Dict, None], key: str
) -> Union[Set, Dict, None]: ) -> Union[Set, Dict, None]:
result = Excludable.get_child(items, key) """
Helper to traverse the include/exclude dictionaries.
In dict() Ellipsis should be skipped as it indicates all fields required
and not the actual set/dict with fields names.
:param items: current include/exclude value
:type items: Union[Set, Dict, None]
:param key: key for nested relations to check
:type key: str
:return: nested value of the items
:rtype: Union[Set, Dict, None]
"""
result = self.get_child(items, key)
return result if result is not Ellipsis else None return result if result is not Ellipsis else None
def _extract_nested_models( # noqa: CCR001 def _extract_nested_models( # noqa: CCR001
@ -289,6 +475,21 @@ class NewBaseModel(
include: Optional[Dict], include: Optional[Dict],
exclude: Optional[Dict], exclude: Optional[Dict],
) -> Dict: ) -> Dict:
"""
Traverse nested models and converts them into dictionaries.
Calls itself recursively if needed.
:param nested: flag if current instance is nested
:type nested: bool
:param dict_instance: current instance dict
:type dict_instance: Dict
:param include: fields to include
:type include: Optional[Dict]
:param exclude: fields to exclude
:type exclude: Optional[Dict]
:return: current model dict with child models converted to dictionaries
:rtype: Dict
"""
fields = self._get_related_not_excluded_fields(include=include, exclude=exclude) fields = self._get_related_not_excluded_fields(include=include, exclude=exclude)
@ -324,6 +525,34 @@ class NewBaseModel(
exclude_none: bool = False, exclude_none: bool = False,
nested: bool = False, nested: bool = False,
) -> "DictStrAny": # noqa: A003' ) -> "DictStrAny": # noqa: A003'
"""
Generate a dictionary representation of the model,
optionally specifying which fields to include or exclude.
Nested models are also parsed to dictionaries.
Additionally fields decorated with @property_field are also added.
:param include: fields to include
:type include: Union[Set, Dict, None]
:param exclude: fields to exclude
:type exclude: Union[Set, Dict, None]
:param by_alias: flag to get values by alias - passed to pydantic
:type by_alias: bool
:param skip_defaults: flag to not set values - passed to pydantic
:type skip_defaults: bool
:param exclude_unset: flag to exclude not set values - passed to pydantic
:type exclude_unset: bool
:param exclude_defaults: flag to exclude default values - passed to pydantic
:type exclude_defaults: bool
:param exclude_none: flag to exclude None values - passed to pydantic
:type exclude_none: bool
:param nested: flag if the current model is nested
:type nested: bool
:return:
:rtype:
"""
dict_instance = super().dict( dict_instance = super().dict(
include=include, include=include,
exclude=self._update_excluded_with_related_not_required(exclude, nested), exclude=self._update_excluded_with_related_not_required(exclude, nested),
@ -354,12 +583,32 @@ class NewBaseModel(
return dict_instance return dict_instance
def from_dict(self, value_dict: Dict) -> "NewBaseModel": def update_from_dict(self, value_dict: Dict) -> "NewBaseModel":
"""
Updates self with values of fields passed in the dictionary.
:param value_dict: dictionary of fields names and values
:type value_dict: Dict
:return: self
:rtype: NewBaseModel
"""
for key, value in value_dict.items(): for key, value in value_dict.items():
setattr(self, key, value) setattr(self, key, value)
return self return self
def _convert_json(self, column_name: str, value: Any, op: str) -> Union[str, Dict]: def _convert_json(self, column_name: str, value: Any, op: str) -> Union[str, Dict]:
"""
Converts value to/from json if needed (for Json columns).
:param column_name: name of the field
:type column_name: str
:param value: value fo the field
:type value: Any
:param op: operator on json
:type op: str
:return: converted value if needed, else original value
:rtype: Any
"""
if not self._is_conversion_to_json_needed(column_name): if not self._is_conversion_to_json_needed(column_name):
return value return value
@ -372,13 +621,72 @@ class NewBaseModel(
if condition: if condition:
try: try:
return operand(value) value = operand(value)
except TypeError: # pragma no cover except TypeError: # pragma no cover
pass pass
return value return value.decode("utf-8") if isinstance(value, bytes) else value
def _is_conversion_to_json_needed(self, column_name: str) -> bool: def _is_conversion_to_json_needed(self, column_name: str) -> bool:
"""
Checks if given column name is related to JSON field.
:param column_name: name of the field
:type column_name: str
:return: result of the check
:rtype: bool
"""
return ( return (
column_name in self.Meta.model_fields column_name in self.Meta.model_fields
and self.Meta.model_fields[column_name].__type__ == pydantic.Json and self.Meta.model_fields[column_name].__type__ == pydantic.Json
) )
def _extract_own_model_fields(self) -> Dict:
"""
Returns a dictionary with field names and values for fields that are not
relations fields (ForeignKey, ManyToMany etc.)
:return: dictionary of fields names and values.
:rtype: Dict
"""
related_names = self.extract_related_names()
self_fields = self.dict(exclude=related_names)
return self_fields
def _extract_model_db_fields(self) -> Dict:
"""
Returns a dictionary with field names and values for fields that are stored in
current model's table.
That includes own non-relational fields ang foreign key fields.
:return: dictionary of fields names and values.
:rtype: Dict
"""
self_fields = self._extract_own_model_fields()
self_fields = {
k: v
for k, v in self_fields.items()
if self.get_column_alias(k) in self.Meta.table.columns
}
for field in self._extract_db_related_names():
target_pk_name = self.Meta.model_fields[field].to.Meta.pkname
target_field = getattr(self, field)
self_fields[field] = getattr(target_field, target_pk_name, None)
return self_fields
def get_relation_model_id(self, target_field: Type["BaseField"]) -> Optional[int]:
"""
Returns an id of the relation side model to use in prefetch query.
:param target_field: field with relation definition
:type target_field: Type["BaseField"]
:return: value of pk if set
:rtype: Optional[int]
"""
if target_field.virtual or issubclass(
target_field, ormar.fields.ManyToManyField
):
return self.pk
related_name = target_field.name
related_model = getattr(self, related_name)
return None if not related_model else related_model.pk

View File

@ -1,3 +1,7 @@
"""
Contains set of fields/methods etc names that are used to bypass the checks in
NewBaseModel __getattribute__ calls to speed the calls.
"""
quick_access_set = { quick_access_set = {
"Config", "Config",
"Meta", "Meta",
@ -33,7 +37,7 @@ quick_access_set = {
"delete", "delete",
"dict", "dict",
"extract_related_names", "extract_related_names",
"from_dict", "update_from_dict",
"get_column_alias", "get_column_alias",
"get_column_name_from_alias", "get_column_name_from_alias",
"get_filtered_names_to_extract", "get_filtered_names_to_extract",

View File

@ -1,3 +1,6 @@
"""
Contains QuerySet and different Query classes to allow for constructing of sql queries.
"""
from ormar.queryset.filter_query import FilterQuery from ormar.queryset.filter_query import FilterQuery
from ormar.queryset.limit_query import LimitQuery from ormar.queryset.limit_query import LimitQuery
from ormar.queryset.offset_query import OffsetQuery from ormar.queryset.offset_query import OffsetQuery

View File

@ -29,6 +29,10 @@ ESCAPE_CHARACTERS = ["%", "_"]
class QueryClause: class QueryClause:
"""
Constructs where clauses from strings passed as arguments
"""
def __init__( def __init__(
self, model_cls: Type["Model"], filter_clauses: List, select_related: List, self, model_cls: Type["Model"], filter_clauses: List, select_related: List,
) -> None: ) -> None:
@ -42,7 +46,16 @@ class QueryClause:
def filter( # noqa: A003 def filter( # noqa: A003
self, **kwargs: Any self, **kwargs: Any
) -> Tuple[List[sqlalchemy.sql.expression.TextClause], List[str]]: ) -> Tuple[List[sqlalchemy.sql.expression.TextClause], List[str]]:
"""
Main external access point that processes the clauses into sqlalchemy text
clauses and updates select_related list with implicit related tables
mentioned in select_related strings but not included in select_related.
:param kwargs: key, value pair with column names and values
:type kwargs: Any
:return: Tuple with list of where clauses and updated select_related list
:rtype: Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]]
"""
if kwargs.get("pk"): if kwargs.get("pk"):
pk_name = self.model_cls.get_column_alias(self.model_cls.Meta.pkname) pk_name = self.model_cls.get_column_alias(self.model_cls.Meta.pkname)
kwargs[pk_name] = kwargs.pop("pk") kwargs[pk_name] = kwargs.pop("pk")
@ -54,6 +67,16 @@ class QueryClause:
def _populate_filter_clauses( def _populate_filter_clauses(
self, **kwargs: Any self, **kwargs: Any
) -> Tuple[List[sqlalchemy.sql.expression.TextClause], List[str]]: ) -> Tuple[List[sqlalchemy.sql.expression.TextClause], List[str]]:
"""
Iterates all clauses and extracts used operator and field from related
models if needed. Based on the chain of related names the target table
is determined and the final clause is escaped if needed and compiled.
:param kwargs: key, value pair with column names and values
:type kwargs: Any
:return: Tuple with list of where clauses and updated select_related list
:rtype: Tuple[List[sqlalchemy.sql.elements.TextClause], List[str]]
"""
filter_clauses = self.filter_clauses filter_clauses = self.filter_clauses
select_related = list(self._select_related) select_related = list(self._select_related)
@ -100,6 +123,24 @@ class QueryClause:
table: sqlalchemy.Table, table: sqlalchemy.Table,
table_prefix: str, table_prefix: str,
) -> sqlalchemy.sql.expression.TextClause: ) -> sqlalchemy.sql.expression.TextClause:
"""
Escapes characters if it's required.
Substitutes values of the models if value is a ormar Model with its pk value.
Compiles the clause.
:param value: value of the filter
:type value: Any
:param op: filter operator
:type op: str
:param column: column on which filter should be applied
:type column: sqlalchemy.sql.schema.Column
:param table: table on which filter should be applied
:type table: sqlalchemy.sql.schema.Table
:param table_prefix: prefix from AliasManager
:type table_prefix: str
:return: complied and escaped clause
:rtype: sqlalchemy.sql.elements.TextClause
"""
value, has_escaped_character = self._escape_characters_in_clause(op, value) value, has_escaped_character = self._escape_characters_in_clause(op, value)
if isinstance(value, ormar.Model): if isinstance(value, ormar.Model):
@ -119,7 +160,21 @@ class QueryClause:
def _determine_filter_target_table( def _determine_filter_target_table(
self, related_parts: List[str], select_related: List[str] self, related_parts: List[str], select_related: List[str]
) -> Tuple[List[str], str, Type["Model"]]: ) -> Tuple[List[str], str, Type["Model"]]:
"""
Adds related strings to select_related list otherwise the clause would fail as
the required columns would not be present. That means that select_related
list is filled with missing values present in filters.
Walks the relation to retrieve the actual model on which the clause should be
constructed, extracts alias based on last relation leading to target model.
:param related_parts: list of split parts of related string
:type related_parts: List[str]
:param select_related: list of related models
:type select_related: List[str]
:return: list of related models, table_prefix, final model class
:rtype: Tuple[List[str], str, Type[Model]]
"""
table_prefix = "" table_prefix = ""
model_cls = self.model_cls model_cls = self.model_cls
select_related = [relation for relation in select_related] select_related = [relation for relation in select_related]
@ -137,11 +192,9 @@ class QueryClause:
if issubclass(model_cls.Meta.model_fields[part], ManyToManyField): if issubclass(model_cls.Meta.model_fields[part], ManyToManyField):
through_field = model_cls.Meta.model_fields[part] through_field = model_cls.Meta.model_fields[part]
previous_model = through_field.through previous_model = through_field.through
part2 = model_cls.resolve_relation_name( part2 = through_field.default_target_field_name() # type: ignore
through_field.through, through_field.to, explicit_multi=True
)
manager = model_cls.Meta.alias_manager manager = model_cls.Meta.alias_manager
table_prefix = manager.resolve_relation_join_new(previous_model, part2) table_prefix = manager.resolve_relation_alias(previous_model, part2)
model_cls = model_cls.Meta.model_fields[part].to model_cls = model_cls.Meta.model_fields[part].to
previous_model = model_cls previous_model = model_cls
return select_related, table_prefix, model_cls return select_related, table_prefix, model_cls
@ -154,6 +207,23 @@ class QueryClause:
table_prefix: str, table_prefix: str,
modifiers: Dict, modifiers: Dict,
) -> sqlalchemy.sql.expression.TextClause: ) -> sqlalchemy.sql.expression.TextClause:
"""
Compiles the clause to str using appropriate database dialect, replace columns
names with aliased names and converts it back to TextClause.
:param clause: original not compiled clause
:type clause: sqlalchemy.sql.elements.BinaryExpression
:param column: column on which filter should be applied
:type column: sqlalchemy.sql.schema.Column
:param table: table on which filter should be applied
:type table: sqlalchemy.sql.schema.Table
:param table_prefix: prefix from AliasManager
:type table_prefix: str
:param modifiers: sqlalchemy modifiers - used only to escape chars here
:type modifiers: Dict[str, NoneType]
:return: compiled and escaped clause
:rtype: sqlalchemy.sql.elements.TextClause
"""
for modifier, modifier_value in modifiers.items(): for modifier, modifier_value in modifiers.items():
clause.modifiers[modifier] = modifier_value clause.modifiers[modifier] = modifier_value
@ -171,6 +241,19 @@ class QueryClause:
@staticmethod @staticmethod
def _escape_characters_in_clause(op: str, value: Any) -> Tuple[Any, bool]: def _escape_characters_in_clause(op: str, value: Any) -> Tuple[Any, bool]:
"""
Escapes the special characters ["%", "_"] if needed.
Adds `%` for `like` queries.
:raises QueryDefinitionError: if contains or icontains is used with
ormar model instance
:param op: operator used in query
:type op: str
:param value: value of the filter
:type value: Any
:return: escaped value and flag if escaping is needed
:rtype: Tuple[Any, bool]
"""
has_escaped_character = False has_escaped_character = False
if op not in [ if op not in [
@ -204,6 +287,14 @@ class QueryClause:
def _extract_operator_field_and_related( def _extract_operator_field_and_related(
parts: List[str], parts: List[str],
) -> Tuple[str, str, Optional[List]]: ) -> Tuple[str, str, Optional[List]]:
"""
Splits filter query key and extracts required parts.
:param parts: split filter query key
:type parts: List[str]
:return: operator, field_name, list of related parts
:rtype: Tuple[str, str, Optional[List]]
"""
if parts[-1] in FILTER_OPERATORS: if parts[-1] in FILTER_OPERATORS:
op = parts[-1] op = parts[-1]
field_name = parts[-2] field_name = parts[-2]

View File

@ -4,11 +4,23 @@ import sqlalchemy
class FilterQuery: class FilterQuery:
"""
Modifies the select query with given list of where/filter clauses.
"""
def __init__(self, filter_clauses: List, exclude: bool = False) -> None: def __init__(self, filter_clauses: List, exclude: bool = False) -> None:
self.exclude = exclude self.exclude = exclude
self.filter_clauses = filter_clauses self.filter_clauses = filter_clauses
def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select:
"""
Applies all filter clauses if set.
:param expr: query to modify
:type expr: sqlalchemy.sql.selectable.Select
:return: modified query
:rtype: sqlalchemy.sql.selectable.Select
"""
if self.filter_clauses: if self.filter_clauses:
if len(self.filter_clauses) == 1: if len(self.filter_clauses) == 1:
clause = self.filter_clauses[0] clause = self.filter_clauses[0]

View File

@ -22,6 +22,10 @@ if TYPE_CHECKING: # pragma no cover
class JoinParameters(NamedTuple): class JoinParameters(NamedTuple):
"""
Named tuple that holds set of parameters passed during join construction.
"""
prev_model: Type["Model"] prev_model: Type["Model"]
previous_alias: str previous_alias: str
from_table: str from_table: str
@ -48,13 +52,36 @@ class SqlJoin:
self.sorted_orders = sorted_orders self.sorted_orders = sorted_orders
@staticmethod @staticmethod
def relation_manager(model_cls: Type["Model"]) -> AliasManager: def alias_manager(model_cls: Type["Model"]) -> AliasManager:
"""
Shortcut for ormars model AliasManager stored on Meta.
:param model_cls: ormar Model class
:type model_cls: Type[Model]
:return: alias manager from model's Meta
:rtype: AliasManager
"""
return model_cls.Meta.alias_manager return model_cls.Meta.alias_manager
@staticmethod @staticmethod
def on_clause( def on_clause(
previous_alias: str, alias: str, from_clause: str, to_clause: str, previous_alias: str, alias: str, from_clause: str, to_clause: str,
) -> text: ) -> text:
"""
Receives aliases and names of both ends of the join and combines them
into one text clause used in joins.
:param previous_alias: alias of previous table
:type previous_alias: str
:param alias: alias of current table
:type alias: str
:param from_clause: from table name
:type from_clause: str
:param to_clause: to table name
:type to_clause: str
:return: clause combining all strings
:rtype: sqlalchemy.text
"""
left_part = f"{alias}_{to_clause}" left_part = f"{alias}_{to_clause}"
right_part = f"{previous_alias + '_' if previous_alias else ''}{from_clause}" right_part = f"{previous_alias + '_' if previous_alias else ''}{from_clause}"
return text(f"{left_part}={right_part}") return text(f"{left_part}={right_part}")
@ -66,6 +93,20 @@ class SqlJoin:
exclude_fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]],
nested_name: str, nested_name: str,
) -> Tuple[Optional[Union[Dict, Set]], Optional[Union[Dict, Set]]]: ) -> Tuple[Optional[Union[Dict, Set]], Optional[Union[Dict, Set]]]:
"""
Extract nested fields and exclude_fields if applicable.
:param model_cls: ormar model class
:type model_cls: Type["Model"]
:param fields: fields to include
:type fields: Optional[Union[Set, Dict]]
:param exclude_fields: fields to exclude
:type exclude_fields: Optional[Union[Set, Dict]]
:param nested_name: name of the nested field
:type nested_name: str
:return: updated exclude and include fields from nested objects
:rtype: Tuple[Optional[Union[Dict, Set]], Optional[Union[Dict, Set]]]
"""
fields = model_cls.get_included(fields, nested_name) fields = model_cls.get_included(fields, nested_name)
exclude_fields = model_cls.get_excluded(exclude_fields, nested_name) exclude_fields = model_cls.get_excluded(exclude_fields, nested_name)
return fields, exclude_fields return fields, exclude_fields
@ -73,7 +114,19 @@ class SqlJoin:
def build_join( # noqa: CCR001 def build_join( # noqa: CCR001
self, item: str, join_parameters: JoinParameters self, item: str, join_parameters: JoinParameters
) -> Tuple[List, sqlalchemy.sql.select, List, OrderedDict]: ) -> Tuple[List, sqlalchemy.sql.select, List, OrderedDict]:
"""
Main external access point for building a join.
Splits the join definition, updates fields and exclude_fields if needed,
handles switching to through models for m2m relations, returns updated lists of
used_aliases and sort_orders.
:param item: string with join definition
:type item: str
:param join_parameters: parameters from previous/ current join
:type join_parameters: JoinParameters
:return: list of used aliases, select from, list of aliased columns, sort orders
:rtype: Tuple[List[str], Join, List[TextClause], collections.OrderedDict]
"""
fields = self.fields fields = self.fields
exclude_fields = self.exclude_fields exclude_fields = self.exclude_fields
@ -100,6 +153,7 @@ class SqlJoin:
exclude_fields=exclude_fields, exclude_fields=exclude_fields,
) )
part = new_part part = new_part
if index > 0: # nested joins if index > 0: # nested joins
fields, exclude_fields = SqlJoin.update_inclusions( fields, exclude_fields = SqlJoin.update_inclusions(
model_cls=join_parameters.model_cls, model_cls=join_parameters.model_cls,
@ -129,13 +183,30 @@ class SqlJoin:
exclude_fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]],
is_multi: bool = False, is_multi: bool = False,
) -> JoinParameters: ) -> JoinParameters:
"""
Updates used_aliases to not join multiple times to the same table.
Updates join parameters with new values.
:param part: part of the join str definition
:type part: str
:param join_params: parameters from previous/ current join
:type join_params: JoinParameters
:param fields: fields to include
:type fields: Optional[Union[Set, Dict]]
:param exclude_fields: fields to exclude
:type exclude_fields: Optional[Union[Set, Dict]]
:param is_multi: flag if the relation is m2m
:type is_multi: bool
:return: updated join parameters
:rtype: ormar.queryset.join.JoinParameters
"""
if is_multi: if is_multi:
model_cls = join_params.model_cls.Meta.model_fields[part].through model_cls = join_params.model_cls.Meta.model_fields[part].through
else: else:
model_cls = join_params.model_cls.Meta.model_fields[part].to model_cls = join_params.model_cls.Meta.model_fields[part].to
to_table = model_cls.Meta.table.name to_table = model_cls.Meta.table.name
alias = model_cls.Meta.alias_manager.resolve_relation_join_new( alias = model_cls.Meta.alias_manager.resolve_relation_alias(
join_params.prev_model, part join_params.prev_model, part
) )
if alias not in self.used_aliases: if alias not in self.used_aliases:
@ -164,6 +235,34 @@ class SqlJoin:
fields: Optional[Union[Set, Dict]], fields: Optional[Union[Set, Dict]],
exclude_fields: Optional[Union[Set, Dict]], exclude_fields: Optional[Union[Set, Dict]],
) -> None: ) -> None:
"""
Resolves to and from column names and table names.
Produces on_clause.
Performs actual join updating select_from parameter.
Adds aliases of required column to list of columns to include in query.
Updates the used aliases list directly.
Process order_by causes for non m2m relations.
:param join_params: parameters from previous/ current join
:type join_params: JoinParameters
:param is_multi: flag if it's m2m relation
:type is_multi: bool
:param model_cls:
:type model_cls: ormar.models.metaclass.ModelMetaclass
:param part: name of the field used in join
:type part: str
:param alias: alias of the current join
:type alias: str
:param fields: fields to include
:type fields: Optional[Union[Set, Dict]]
:param exclude_fields: fields to exclude
:type exclude_fields: Optional[Union[Set, Dict]]
"""
to_table = model_cls.Meta.table.name to_table = model_cls.Meta.table.name
to_key, from_key = self.get_to_and_from_keys( to_key, from_key = self.get_to_and_from_keys(
join_params, is_multi, model_cls, part join_params, is_multi, model_cls, part
@ -175,7 +274,7 @@ class SqlJoin:
from_clause=f"{join_params.from_table}.{from_key}", from_clause=f"{join_params.from_table}.{from_key}",
to_clause=f"{to_table}.{to_key}", to_clause=f"{to_table}.{to_key}",
) )
target_table = self.relation_manager(model_cls).prefixed_table_name( target_table = self.alias_manager(model_cls).prefixed_table_name(
alias, to_table alias, to_table
) )
self.select_from = sqlalchemy.sql.outerjoin( self.select_from = sqlalchemy.sql.outerjoin(
@ -199,13 +298,21 @@ class SqlJoin:
use_alias=True, use_alias=True,
) )
self.columns.extend( self.columns.extend(
self.relation_manager(model_cls).prefixed_columns( self.alias_manager(model_cls).prefixed_columns(
alias, model_cls.Meta.table, self_related_fields alias, model_cls.Meta.table, self_related_fields
) )
) )
self.used_aliases.append(alias) self.used_aliases.append(alias)
def _switch_many_to_many_order_columns(self, part: str, new_part: str) -> None: def _switch_many_to_many_order_columns(self, part: str, new_part: str) -> None:
"""
Substitutes the name of the relation with actual model name in m2m order bys.
:param part: name of the field with relation
:type part: str
:param new_part: name of the target model
:type new_part: str
"""
if self.order_columns: if self.order_columns:
split_order_columns = [ split_order_columns = [
x.split("__") for x in self.order_columns if "__" in x x.split("__") for x in self.order_columns if "__" in x
@ -219,6 +326,16 @@ class SqlJoin:
@staticmethod @staticmethod
def _check_if_condition_apply(condition: List, part: str) -> bool: def _check_if_condition_apply(condition: List, part: str) -> bool:
"""
Checks filter conditions to find if they apply to current join.
:param condition: list of parts of condition split by '__'
:type condition: List[str]
:param part: name of the current relation join.
:type part: str
:return: result of the check
:rtype: bool
"""
return len(condition) >= 2 and ( return len(condition) >= 2 and (
condition[-2] == part or condition[-2][1:] == part condition[-2] == part or condition[-2][1:] == part
) )
@ -226,6 +343,19 @@ class SqlJoin:
def set_aliased_order_by( def set_aliased_order_by(
self, condition: List[str], alias: str, to_table: str, model_cls: Type["Model"], self, condition: List[str], alias: str, to_table: str, model_cls: Type["Model"],
) -> None: ) -> None:
"""
Substitute hyphens ('-') with descending order.
Construct actual sqlalchemy text clause using aliased table and column name.
:param condition: list of parts of a current condition split by '__'
:type condition: List[str]
:param alias: alias of the table in current join
:type alias: str
:param to_table: target table
:type to_table: sqlalchemy.sql.elements.quoted_name
:param model_cls: ormar model class
:type model_cls: ormar.models.metaclass.ModelMetaclass
"""
direction = f"{'desc' if condition[0][0] == '-' else ''}" direction = f"{'desc' if condition[0][0] == '-' else ''}"
column_alias = model_cls.get_column_alias(condition[-1]) column_alias = model_cls.get_column_alias(condition[-1])
order = text(f"{alias}_{to_table}.{column_alias} {direction}") order = text(f"{alias}_{to_table}.{column_alias} {direction}")
@ -239,21 +369,42 @@ class SqlJoin:
part: str, part: str,
model_cls: Type["Model"], model_cls: Type["Model"],
) -> None: ) -> None:
"""
Triggers construction of order bys if they are given.
Otherwise by default each table is sorted by a primary key column asc.
:param alias: alias of current table in join
:type alias: str
:param to_table: target table
:type to_table: sqlalchemy.sql.elements.quoted_name
:param pkname_alias: alias of the primary key column
:type pkname_alias: str
:param part: name of the current relation join
:type part: str
:param model_cls: ormar model class
:type model_cls: Type[Model]
"""
if self.order_columns: if self.order_columns:
current_table_sorted = False
split_order_columns = [ split_order_columns = [
x.split("__") for x in self.order_columns if "__" in x x.split("__") for x in self.order_columns if "__" in x
] ]
for condition in split_order_columns: for condition in split_order_columns:
if self._check_if_condition_apply(condition, part): if self._check_if_condition_apply(condition, part):
current_table_sorted = True
self.set_aliased_order_by( self.set_aliased_order_by(
condition=condition, condition=condition,
alias=alias, alias=alias,
to_table=to_table, to_table=to_table,
model_cls=model_cls, model_cls=model_cls,
) )
if not current_table_sorted:
order = text(f"{alias}_{to_table}.{pkname_alias}")
self.sorted_orders[f"{alias}.{pkname_alias}"] = order
else: else:
order = text(f"{alias}_{to_table}.{pkname_alias}") order = text(f"{alias}_{to_table}.{pkname_alias}")
self.sorted_orders[f"{to_table}.{pkname_alias}"] = order self.sorted_orders[f"{alias}.{pkname_alias}"] = order
@staticmethod @staticmethod
def get_to_and_from_keys( def get_to_and_from_keys(
@ -262,6 +413,22 @@ class SqlJoin:
model_cls: Type["Model"], model_cls: Type["Model"],
part: str, part: str,
) -> Tuple[str, str]: ) -> Tuple[str, str]:
"""
Based on the relation type, name of the relation and previous models and parts
stored in JoinParameters it resolves the current to and from keys, which are
different for ManyToMany relation, ForeignKey and reverse part of relations.
:param join_params: parameters from previous/ current join
:type join_params: JoinParameters
:param is_multi: flag if the relation is of m2m type
:type is_multi: bool
:param model_cls: ormar model class
:type model_cls: Type[Model]
:param part: name of the current relation join
:type part: str
:return: to key and from key
:rtype: Tuple[str, str]
"""
if is_multi: if is_multi:
to_field = join_params.prev_model.get_name() to_field = join_params.prev_model.get_name()
to_key = model_cls.get_column_alias(to_field) to_key = model_cls.get_column_alias(to_field)

View File

@ -4,10 +4,22 @@ import sqlalchemy
class LimitQuery: class LimitQuery:
"""
Modifies the select query with limit clause.
"""
def __init__(self, limit_count: Optional[int]) -> None: def __init__(self, limit_count: Optional[int]) -> None:
self.limit_count = limit_count self.limit_count = limit_count
def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select:
"""
Applies the limit clause.
:param expr: query to modify
:type expr: sqlalchemy.sql.selectable.Select
:return: modified query
:rtype: sqlalchemy.sql.selectable.Select
"""
if self.limit_count: if self.limit_count:
expr = expr.limit(self.limit_count) expr = expr.limit(self.limit_count)
return expr return expr

View File

@ -4,10 +4,22 @@ import sqlalchemy
class OffsetQuery: class OffsetQuery:
"""
Modifies the select query with offset if set
"""
def __init__(self, query_offset: Optional[int]) -> None: def __init__(self, query_offset: Optional[int]) -> None:
self.query_offset = query_offset self.query_offset = query_offset
def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select:
"""
Applies the offset clause.
:param expr: query to modify
:type expr: sqlalchemy.sql.selectable.Select
:return: modified query
:rtype: sqlalchemy.sql.selectable.Select
"""
if self.query_offset: if self.query_offset:
expr = expr.offset(self.query_offset) expr = expr.offset(self.query_offset)
return expr return expr

View File

@ -4,10 +4,22 @@ import sqlalchemy
class OrderQuery: class OrderQuery:
"""
Modifies the select query with given list of order_by clauses.
"""
def __init__(self, sorted_orders: Dict) -> None: def __init__(self, sorted_orders: Dict) -> None:
self.sorted_orders = sorted_orders self.sorted_orders = sorted_orders
def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select: def apply(self, expr: sqlalchemy.sql.select) -> sqlalchemy.sql.select:
"""
Applies all order_by clauses if set.
:param expr: query to modify
:type expr: sqlalchemy.sql.selectable.Select
:return: modified query
:rtype: sqlalchemy.sql.selectable.Select
"""
if self.sorted_orders: if self.sorted_orders:
for order in list(self.sorted_orders.values()): for order in list(self.sorted_orders.values()):
if order is not None: if order is not None:

View File

@ -24,6 +24,18 @@ if TYPE_CHECKING: # pragma: no cover
def add_relation_field_to_fields( def add_relation_field_to_fields(
fields: Union[Set[Any], Dict[Any, Any], None], related_field_name: str fields: Union[Set[Any], Dict[Any, Any], None], related_field_name: str
) -> Union[Set[Any], Dict[Any, Any], None]: ) -> Union[Set[Any], Dict[Any, Any], None]:
"""
Adds related field into fields to include as otherwise it would be skipped.
Related field is added only if fields are already populated.
Empty fields implies all fields.
:param fields: Union[Set[Any], Dict[Any, Any], None]
:type fields: Dict
:param related_field_name: name of the field with relation
:type related_field_name: str
:return: updated fields dict
:rtype: Union[Set[Any], Dict[Any, Any], None]
"""
if fields and related_field_name not in fields: if fields and related_field_name not in fields:
if isinstance(fields, dict): if isinstance(fields, dict):
fields[related_field_name] = ... fields[related_field_name] = ...
@ -33,6 +45,18 @@ def add_relation_field_to_fields(
def sort_models(models: List["Model"], orders_by: Dict) -> List["Model"]: def sort_models(models: List["Model"], orders_by: Dict) -> List["Model"]:
"""
Since prefetch query gets all related models by ids the sorting needs to happen in
python. Since by default models are already sorted by id here we resort only if
order_by parameters was set.
:param models: list of models already fetched from db
:type models: List[tests.test_prefetch_related.Division]
:param orders_by: order by dictionary
:type orders_by: Dict[str, str]
:return: sorted list of models
:rtype: List[tests.test_prefetch_related.Division]
"""
sort_criteria = [ sort_criteria = [
(key, value) for key, value in orders_by.items() if isinstance(value, str) (key, value) for key, value in orders_by.items() if isinstance(value, str)
] ]
@ -54,6 +78,29 @@ def set_children_on_model( # noqa: CCR001
models: Dict, models: Dict,
orders_by: Dict, orders_by: Dict,
) -> None: ) -> None:
"""
Extract ids of child models by given relation id key value.
Based on those ids the actual children model instances are fetched from
already fetched data.
If needed the child models are resorted according to passed orders_by dict.
Also relation is registered as each child is set as parent related field name value.
:param model: parent model instance
:type model: Model
:param related: name of the related field
:type related: str
:param children: dictionary of children ids/ related field value
:type children: Dict[int, set]
:param model_id: id of the model on which children should be set
:type model_id: int
:param models: dictionary of child models instances
:type models: Dict
:param orders_by: order_by dictionary
:type orders_by: Dict
"""
for key, child_models in children.items(): for key, child_models in children.items():
if key == model_id: if key == model_id:
models_to_set = [models[child] for child in sorted(child_models)] models_to_set = [models[child] for child in sorted(child_models)]
@ -67,6 +114,12 @@ def set_children_on_model( # noqa: CCR001
class PrefetchQuery: class PrefetchQuery:
"""
Query used to fetch related models in subsequent queries.
Each model is fetched only ones by the name of the relation.
That means that for each prefetch_related entry next query is issued to database.
"""
def __init__( # noqa: CFQ002 def __init__( # noqa: CFQ002
self, self,
model_cls: Type["Model"], model_cls: Type["Model"],
@ -92,6 +145,22 @@ class PrefetchQuery:
async def prefetch_related( async def prefetch_related(
self, models: Sequence["Model"], rows: List self, models: Sequence["Model"], rows: List
) -> Sequence["Model"]: ) -> Sequence["Model"]:
"""
Main entry point for prefetch_query.
Receives list of already initialized parent models with all children from
select_related already populated. Receives also list of row sql result rows
as it's quicker to extract ids that way instead of calling each model.
Returns list with related models already prefetched and set.
:param models: list of already instantiated models from main query
:type models: List[Model]
:param rows: row sql result of the main query before the prefetch
:type rows: List[sqlalchemy.engine.result.RowProxy]
:return: list of models with children prefetched
:rtype: List[Model]
"""
self.models = extract_models_to_dict_of_lists( self.models = extract_models_to_dict_of_lists(
model_type=self.model, models=models, select_dict=self.select_dict model_type=self.model, models=models, select_dict=self.select_dict
) )
@ -101,6 +170,17 @@ class PrefetchQuery:
def _extract_ids_from_raw_data( def _extract_ids_from_raw_data(
self, parent_model: Type["Model"], column_name: str self, parent_model: Type["Model"], column_name: str
) -> Set: ) -> Set:
"""
Iterates over raw rows and extract id values of relation columns by using
prefixed column name.
:param parent_model: ormar model class
:type parent_model: Type[Model]
:param column_name: name of the relation column which is a key column
:type column_name: str
:return: set of ids of related model that should be extracted
:rtype: set
"""
list_of_ids = set() list_of_ids = set()
current_data = self.already_extracted.get(parent_model.get_name(), {}) current_data = self.already_extracted.get(parent_model.get_name(), {})
table_prefix = current_data.get("prefix", "") table_prefix = current_data.get("prefix", "")
@ -113,6 +193,17 @@ class PrefetchQuery:
def _extract_ids_from_preloaded_models( def _extract_ids_from_preloaded_models(
self, parent_model: Type["Model"], column_name: str self, parent_model: Type["Model"], column_name: str
) -> Set: ) -> Set:
"""
Extracts relation ids from already populated models if they were included
in the original query before.
:param parent_model: model from which related ids should be extracted
:type parent_model: Type["Model"]
:param column_name: name of the relation column which is a key column
:type column_name: str
:return: set of ids of related model that should be extracted
:rtype: set
"""
list_of_ids = set() list_of_ids = set()
for model in self.models.get(parent_model.get_name(), []): for model in self.models.get(parent_model.get_name(), []):
child = getattr(model, column_name) child = getattr(model, column_name)
@ -123,15 +214,27 @@ class PrefetchQuery:
return list_of_ids return list_of_ids
def _extract_required_ids( def _extract_required_ids(
self, parent_model: Type["Model"], target_model: Type["Model"], reverse: bool, self, parent_model: Type["Model"], reverse: bool, related: str,
) -> Set: ) -> Set:
"""
Delegates extraction of the fields to either get ids from raw sql response
or from already populated models.
:param parent_model: model from which related ids should be extracted
:type parent_model: Type["Model"]
:param reverse: flag if the relation is reverse
:type reverse: bool
:param related: name of the field with relation
:type related: str
:return: set of ids of related model that should be extracted
:rtype: set
"""
use_raw = parent_model.get_name() not in self.models use_raw = parent_model.get_name() not in self.models
column_name = parent_model.get_column_name_for_id_extraction( column_name = parent_model.get_column_name_for_id_extraction(
parent_model=parent_model, parent_model=parent_model,
target_model=target_model,
reverse=reverse, reverse=reverse,
related=related,
use_raw=use_raw, use_raw=use_raw,
) )
@ -151,8 +254,25 @@ class PrefetchQuery:
reverse: bool, reverse: bool,
related: str, related: str,
) -> List: ) -> List:
"""
Populates where clause with condition to return only models within the
set of extracted ids.
If there are no ids for relation the empty list is returned.
:param parent_model: model from which related ids should be extracted
:type parent_model: Type["Model"]
:param target_model: model to which relation leads to
:type target_model: Type["Model"]
:param reverse: flag if the relation is reverse
:type reverse: bool
:param related: name of the field with relation
:type related: str
:return:
:rtype: List[sqlalchemy.sql.elements.TextClause]
"""
ids = self._extract_required_ids( ids = self._extract_required_ids(
parent_model=parent_model, target_model=target_model, reverse=reverse, parent_model=parent_model, reverse=reverse, related=related
) )
if ids: if ids:
( (
@ -175,7 +295,19 @@ class PrefetchQuery:
def _populate_nested_related( def _populate_nested_related(
self, model: "Model", prefetch_dict: Dict, orders_by: Dict, self, model: "Model", prefetch_dict: Dict, orders_by: Dict,
) -> "Model": ) -> "Model":
"""
Populates all related models children of parent model that are
included in prefetch query.
:param model: ormar model instance
:type model: Model
:param prefetch_dict: dictionary of models to prefetch
:type prefetch_dict: Dict
:param orders_by: dictionary of order bys
:type orders_by: Dict
:return: model with children populated
:rtype: Model
"""
related_to_extract = model.get_filtered_names_to_extract( related_to_extract = model.get_filtered_names_to_extract(
prefetch_dict=prefetch_dict prefetch_dict=prefetch_dict
) )
@ -206,6 +338,24 @@ class PrefetchQuery:
async def _prefetch_related_models( async def _prefetch_related_models(
self, models: Sequence["Model"], rows: List self, models: Sequence["Model"], rows: List
) -> Sequence["Model"]: ) -> Sequence["Model"]:
"""
Main method of the query.
Translates select nad prefetch list into dictionaries to avoid querying the
same related models multiple times.
Keeps the list of already extracted models.
Extracts the related models from the database and later populate all children
on each of the parent models from list.
:param models: list of parent models from main query
:type models: List[Model]
:param rows: raw response from sql query
:type rows: List[sqlalchemy.engine.result.RowProxy]
:return: list of models with prefetch children populated
:rtype: List[Model]
"""
self.already_extracted = {self.model.get_name(): {"raw": rows}} self.already_extracted = {self.model.get_name(): {"raw": rows}}
select_dict = translate_list_to_dict(self._select_related) select_dict = translate_list_to_dict(self._select_related)
prefetch_dict = translate_list_to_dict(self._prefetch_related) prefetch_dict = translate_list_to_dict(self._prefetch_related)
@ -242,7 +392,32 @@ class PrefetchQuery:
exclude_fields: Union[Set[Any], Dict[Any, Any], None], exclude_fields: Union[Set[Any], Dict[Any, Any], None],
orders_by: Dict, orders_by: Dict,
) -> None: ) -> None:
"""
Constructs queries with required ids and extracts data with fields that should
be included/excluded.
Runs the queries against the database and populated dictionaries with ids and
with actual extracted children models.
Calls itself recurrently to extract deeper nested relations of related model.
:param related: name of the relation
:type related: str
:param target_model: model to which relation leads to
:type target_model: Type[Model]
:param prefetch_dict: prefetch related list converted into dictionary
:type prefetch_dict: Dict
:param select_dict: select related list converted into dictionary
:type select_dict: Dict
:param fields: fields to include
:type fields: Union[Set[Any], Dict[Any, Any], None]
:param exclude_fields: fields to exclude
:type exclude_fields: Union[Set[Any], Dict[Any, Any], None]
:param orders_by: dictionary of order bys clauses
:type orders_by: Dict
:return: None
:rtype: None
"""
fields = target_model.get_included(fields, related) fields = target_model.get_included(fields, related)
exclude_fields = target_model.get_excluded(exclude_fields, related) exclude_fields = target_model.get_excluded(exclude_fields, related)
target_field = target_model.Meta.model_fields[related] target_field = target_model.Meta.model_fields[related]
@ -320,6 +495,24 @@ class PrefetchQuery:
exclude_fields: Union[Set[Any], Dict[Any, Any], None], exclude_fields: Union[Set[Any], Dict[Any, Any], None],
filter_clauses: List, filter_clauses: List,
) -> Tuple[str, List]: ) -> Tuple[str, List]:
"""
Actually runs the queries against the database and populates the raw response
for given related model.
Returns table prefix as it's later needed to eventually initialize the children
models.
:param target_field: ormar field with relation definition
:type target_field: Type["BaseField"]
:param fields: fields to include
:type fields: Union[Set[Any], Dict[Any, Any], None]
:param exclude_fields: fields to exclude
:type exclude_fields: Union[Set[Any], Dict[Any, Any], None]
:param filter_clauses: list of clauses, actually one clause with ids of relation
:type filter_clauses: List[sqlalchemy.sql.elements.TextClause]
:return: table prefix and raw rows from sql response
:rtype: Tuple[str, List]
"""
target_model = target_field.to target_model = target_field.to
target_name = target_model.get_name() target_name = target_model.get_name()
select_related = [] select_related = []
@ -328,7 +521,7 @@ class PrefetchQuery:
if issubclass(target_field, ManyToManyField): if issubclass(target_field, ManyToManyField):
query_target = target_field.through query_target = target_field.through
select_related = [target_name] select_related = [target_name]
table_prefix = target_field.to.Meta.alias_manager.resolve_relation_join_new( table_prefix = target_field.to.Meta.alias_manager.resolve_relation_alias(
query_target, target_name query_target, target_name
) )
self.already_extracted.setdefault(target_name, {})["prefix"] = table_prefix self.already_extracted.setdefault(target_name, {})["prefix"] = table_prefix
@ -343,6 +536,7 @@ class PrefetchQuery:
fields=fields, fields=fields,
exclude_fields=exclude_fields, exclude_fields=exclude_fields,
order_bys=None, order_bys=None,
limit_raw_sql=False,
) )
expr = qry.build_select_expression() expr = qry.build_select_expression()
# print(expr.compile(compile_kwargs={"literal_binds": True})) # print(expr.compile(compile_kwargs={"literal_binds": True}))
@ -352,6 +546,17 @@ class PrefetchQuery:
@staticmethod @staticmethod
def _get_select_related_if_apply(related: str, select_dict: Dict) -> Dict: def _get_select_related_if_apply(related: str, select_dict: Dict) -> Dict:
"""
Extract nested part of select_related dictionary to extract models nested
deeper on related model and already loaded in select related query.
:param related: name of the relation
:type related: str
:param select_dict: dictionary of select related models in main query
:type select_dict: Dict
:return: dictionary with nested part of select related
:rtype: Dict
"""
return ( return (
select_dict.get(related, {}) select_dict.get(related, {})
if (select_dict and select_dict is not Ellipsis and related in select_dict) if (select_dict and select_dict is not Ellipsis and related in select_dict)
@ -361,6 +566,16 @@ class PrefetchQuery:
def _update_already_loaded_rows( # noqa: CFQ002 def _update_already_loaded_rows( # noqa: CFQ002
self, target_field: Type["BaseField"], prefetch_dict: Dict, orders_by: Dict, self, target_field: Type["BaseField"], prefetch_dict: Dict, orders_by: Dict,
) -> None: ) -> None:
"""
Updates models that are already loaded, usually children of children.
:param target_field: ormar field with relation definition
:type target_field: Type["BaseField"]
:param prefetch_dict: dictionaries of related models to prefetch
:type prefetch_dict: Dict
:param orders_by: dictionary of order by clauses by model
:type orders_by: Dict
"""
target_model = target_field.to target_model = target_field.to
for instance in self.models.get(target_model.get_name(), []): for instance in self.models.get(target_model.get_name(), []):
self._populate_nested_related( self._populate_nested_related(
@ -378,6 +593,33 @@ class PrefetchQuery:
prefetch_dict: Dict, prefetch_dict: Dict,
orders_by: Dict, orders_by: Dict,
) -> None: ) -> None:
"""
Instantiates children models extracted from given relation.
Populates them with their own nested children if they are included in prefetch
query.
Sets the initialized models and ids of them under corresponding keys in
already_extracted dictionary. Later those instances will be fetched by ids
and set on the parent model after sorting if needed.
:param rows: raw sql response from the prefetch query
:type rows: List[sqlalchemy.engine.result.RowProxy]
:param target_field: field with relation definition from parent model
:type target_field: Type["BaseField"]
:param parent_model: model with relation definition
:type parent_model: Type[Model]
:param table_prefix: prefix of the target table from current relation
:type table_prefix: str
:param fields: fields to include
:type fields: Union[Set[Any], Dict[Any, Any], None]
:param exclude_fields: fields to exclude
:type exclude_fields: Union[Set[Any], Dict[Any, Any], None]
:param prefetch_dict: dictionaries of related models to prefetch
:type prefetch_dict: Dict
:param orders_by: dictionary of order by clauses by model
:type orders_by: Dict
"""
target_model = target_field.to target_model = target_field.to
for row in rows: for row in rows:
field_name = parent_model.get_related_field_name(target_field=target_field) field_name = parent_model.get_related_field_name(target_field=target_field)
@ -388,6 +630,9 @@ class PrefetchQuery:
fields=fields, fields=fields,
exclude_fields=exclude_fields, exclude_fields=exclude_fields,
) )
item["__excluded__"] = target_model.get_names_to_exclude(
fields=fields, exclude_fields=exclude_fields
)
instance = target_model(**item) instance = target_model(**item)
instance = self._populate_nested_related( instance = self._populate_nested_related(
model=instance, prefetch_dict=prefetch_dict, orders_by=orders_by model=instance, prefetch_dict=prefetch_dict, orders_by=orders_by

View File

@ -25,6 +25,7 @@ class Query:
fields: Optional[Union[Dict, Set]], fields: Optional[Union[Dict, Set]],
exclude_fields: Optional[Union[Dict, Set]], exclude_fields: Optional[Union[Dict, Set]],
order_bys: Optional[List], order_bys: Optional[List],
limit_raw_sql: bool,
) -> None: ) -> None:
self.query_offset = offset self.query_offset = offset
self.limit_count = limit_count self.limit_count = limit_count
@ -45,34 +46,85 @@ class Query:
self.sorted_orders: OrderedDict = OrderedDict() self.sorted_orders: OrderedDict = OrderedDict()
self._init_sorted_orders() self._init_sorted_orders()
self.limit_raw_sql = limit_raw_sql
def _init_sorted_orders(self) -> None: def _init_sorted_orders(self) -> None:
"""
Initialize empty order_by dict to be populated later during the query call
"""
if self.order_columns: if self.order_columns:
for clause in self.order_columns: for clause in self.order_columns:
self.sorted_orders[clause] = None self.sorted_orders[clause] = None
@property @property
def prefixed_pk_name(self) -> str: def prefixed_pk_name(self) -> str:
"""
Shortcut for extracting prefixed with alias primary key column name from main
model
:return: alias of pk column prefix with table name.
:rtype: str
"""
pkname_alias = self.model_cls.get_column_alias(self.model_cls.Meta.pkname) pkname_alias = self.model_cls.get_column_alias(self.model_cls.Meta.pkname)
return f"{self.table.name}.{pkname_alias}" return f"{self.table.name}.{pkname_alias}"
def alias(self, name: str) -> str: def alias(self, name: str) -> str:
"""
Shortcut to extracting column alias from given master model.
:param name: name of column
:type name: str
:return: alias of given column name
:rtype: str
"""
return self.model_cls.get_column_alias(name) return self.model_cls.get_column_alias(name)
def apply_order_bys_for_primary_model(self) -> None: # noqa: CCR001 def apply_order_bys_for_primary_model(self) -> None: # noqa: CCR001
"""
Applies order_by queries on main model when it's used as a subquery.
That way the subquery with limit and offset only on main model has proper
sorting applied and correct models are fetched.
"""
if self.order_columns: if self.order_columns:
for clause in self.order_columns: for clause in self.order_columns:
if "__" not in clause: if "__" not in clause:
clause = ( text_clause = (
text(f"{self.alias(clause[1:])} desc") text(f"{self.table.name}.{self.alias(clause[1:])} desc")
if clause.startswith("-") if clause.startswith("-")
else text(self.alias(clause)) else text(f"{self.table.name}.{self.alias(clause)}")
) )
self.sorted_orders[clause] = clause self.sorted_orders[clause] = text_clause
else: else:
order = text(self.prefixed_pk_name) order = text(self.prefixed_pk_name)
self.sorted_orders[self.prefixed_pk_name] = order self.sorted_orders[self.prefixed_pk_name] = order
def _pagination_query_required(self) -> bool:
"""
Checks if limit or offset are set, the flag limit_sql_raw is not set
and query has select_related applied. Otherwise we can limit/offset normally
at the end of whole query.
:return: result of the check
:rtype: bool
"""
return bool(
(self.limit_count or self.query_offset)
and not self.limit_raw_sql
and self._select_related
)
def build_select_expression(self) -> Tuple[sqlalchemy.sql.select, List[str]]: def build_select_expression(self) -> Tuple[sqlalchemy.sql.select, List[str]]:
"""
Main entry point from outside (after proper initialization).
Extracts columns list to fetch,
construct all required joins for select related,
then applies all conditional and sort clauses.
Returns ready to run query with all joins and clauses.
:return: ready to run query with all joins and clauses.
:rtype: sqlalchemy.sql.selectable.Select
"""
self_related_fields = self.model_cls.own_table_columns( self_related_fields = self.model_cls.own_table_columns(
model=self.model_cls, model=self.model_cls,
fields=self.fields, fields=self.fields,
@ -83,7 +135,10 @@ class Query:
"", self.table, self_related_fields "", self.table, self_related_fields
) )
self.apply_order_bys_for_primary_model() self.apply_order_bys_for_primary_model()
self.select_from = self.table if self._pagination_query_required():
self.select_from = self._build_pagination_subquery()
else:
self.select_from = self.table
self._select_related.sort(key=lambda item: (item, -len(item))) self._select_related.sort(key=lambda item: (item, -len(item)))
@ -120,19 +175,75 @@ class Query:
return expr return expr
def _build_pagination_subquery(self) -> sqlalchemy.sql.select:
"""
In order to apply limit and offset on main table in join only
(otherwise you can get only partially constructed main model
if number of children exceeds the applied limit and select_related is used)
Used also to change first and get() without argument behaviour.
Needed only if limit or offset are set, the flag limit_sql_raw is not set
and query has select_related applied. Otherwise we can limit/offset normally
at the end of whole query.
:return: constructed subquery on main table with limit, offset and order applied
:rtype: sqlalchemy.sql.select
"""
expr = sqlalchemy.sql.select(self.model_cls.Meta.table.columns)
expr = LimitQuery(limit_count=self.limit_count).apply(expr)
expr = OffsetQuery(query_offset=self.query_offset).apply(expr)
filters_to_use = [
filter_clause
for filter_clause in self.filter_clauses
if filter_clause.text.startswith(f"{self.table.name}.")
]
excludes_to_use = [
filter_clause
for filter_clause in self.exclude_clauses
if filter_clause.text.startswith(f"{self.table.name}.")
]
sorts_to_use = {k: v for k, v in self.sorted_orders.items() if "__" not in k}
expr = FilterQuery(filter_clauses=filters_to_use).apply(expr)
expr = FilterQuery(filter_clauses=excludes_to_use, exclude=True).apply(expr)
expr = OrderQuery(sorted_orders=sorts_to_use).apply(expr)
expr = expr.alias(f"{self.table}")
self.filter_clauses = list(set(self.filter_clauses) - set(filters_to_use))
self.exclude_clauses = list(set(self.exclude_clauses) - set(excludes_to_use))
return expr
def _apply_expression_modifiers( def _apply_expression_modifiers(
self, expr: sqlalchemy.sql.select self, expr: sqlalchemy.sql.select
) -> sqlalchemy.sql.select: ) -> sqlalchemy.sql.select:
"""
Receives the select query (might be join) and applies:
* Filter clauses
* Exclude filter clauses
* Limit clauses
* Offset clauses
* Order by clauses
Returns complete ready to run query.
:param expr: select expression before clauses
:type expr: sqlalchemy.sql.selectable.Select
:return: expresion with all present clauses applied
:rtype: sqlalchemy.sql.selectable.Select
"""
expr = FilterQuery(filter_clauses=self.filter_clauses).apply(expr) expr = FilterQuery(filter_clauses=self.filter_clauses).apply(expr)
expr = FilterQuery(filter_clauses=self.exclude_clauses, exclude=True).apply( expr = FilterQuery(filter_clauses=self.exclude_clauses, exclude=True).apply(
expr expr
) )
expr = LimitQuery(limit_count=self.limit_count).apply(expr) if not self._pagination_query_required():
expr = OffsetQuery(query_offset=self.query_offset).apply(expr) expr = LimitQuery(limit_count=self.limit_count).apply(expr)
expr = OffsetQuery(query_offset=self.query_offset).apply(expr)
expr = OrderQuery(sorted_orders=self.sorted_orders).apply(expr) expr = OrderQuery(sorted_orders=self.sorted_orders).apply(expr)
return expr return expr
def _reset_query_parameters(self) -> None: def _reset_query_parameters(self) -> None:
"""
Although it should be created each time before the call we reset the key params
anyway.
"""
self.select_from = [] self.select_from = []
self.columns = [] self.columns = []
self.used_aliases = [] self.used_aliases = []

View File

@ -20,6 +20,10 @@ if TYPE_CHECKING: # pragma no cover
class QuerySet: class QuerySet:
"""
Main class to perform database queries, exposed on each model as objects attribute.
"""
def __init__( # noqa CFQ002 def __init__( # noqa CFQ002
self, self,
model_cls: Type["Model"] = None, model_cls: Type["Model"] = None,
@ -32,6 +36,7 @@ class QuerySet:
exclude_columns: Dict = None, exclude_columns: Dict = None,
order_bys: List = None, order_bys: List = None,
prefetch_related: List = None, prefetch_related: List = None,
limit_raw_sql: bool = False,
) -> None: ) -> None:
self.model_cls = model_cls self.model_cls = model_cls
self.filter_clauses = [] if filter_clauses is None else filter_clauses self.filter_clauses = [] if filter_clauses is None else filter_clauses
@ -43,6 +48,7 @@ class QuerySet:
self._columns = columns or {} self._columns = columns or {}
self._exclude_columns = exclude_columns or {} self._exclude_columns = exclude_columns or {}
self.order_bys = order_bys or [] self.order_bys = order_bys or []
self.limit_sql_raw = limit_raw_sql
def __get__( def __get__(
self, self,
@ -55,12 +61,24 @@ class QuerySet:
@property @property
def model_meta(self) -> "ModelMeta": def model_meta(self) -> "ModelMeta":
"""
Shortcut to model class Meta set on QuerySet model.
:return: Meta class of the model
:rtype: model Meta class
"""
if not self.model_cls: # pragma nocover if not self.model_cls: # pragma nocover
raise ValueError("Model class of QuerySet is not initialized") raise ValueError("Model class of QuerySet is not initialized")
return self.model_cls.Meta return self.model_cls.Meta
@property @property
def model(self) -> Type["Model"]: def model(self) -> Type["Model"]:
"""
Shortcut to model class set on QuerySet.
:return: model class
:rtype: Type[Model]
"""
if not self.model_cls: # pragma nocover if not self.model_cls: # pragma nocover
raise ValueError("Model class of QuerySet is not initialized") raise ValueError("Model class of QuerySet is not initialized")
return self.model_cls return self.model_cls
@ -68,6 +86,16 @@ class QuerySet:
async def _prefetch_related_models( async def _prefetch_related_models(
self, models: Sequence[Optional["Model"]], rows: List self, models: Sequence[Optional["Model"]], rows: List
) -> Sequence[Optional["Model"]]: ) -> Sequence[Optional["Model"]]:
"""
Performs prefetch query for selected models names.
:param models: list of already parsed main Models from main query
:type models: List[Model]
:param rows: database rows from main query
:type rows: List[sqlalchemy.engine.result.RowProxy]
:return: list of models with prefetch models populated
:rtype: List[Model]
"""
query = PrefetchQuery( query = PrefetchQuery(
model_cls=self.model, model_cls=self.model,
fields=self._columns, fields=self._columns,
@ -79,6 +107,14 @@ class QuerySet:
return await query.prefetch_related(models=models, rows=rows) # type: ignore return await query.prefetch_related(models=models, rows=rows) # type: ignore
def _process_query_result_rows(self, rows: List) -> Sequence[Optional["Model"]]: def _process_query_result_rows(self, rows: List) -> Sequence[Optional["Model"]]:
"""
Process database rows and initialize ormar Model from each of the rows.
:param rows: list of database rows from query result
:type rows: List[sqlalchemy.engine.result.RowProxy]
:return: list of models
:rtype: List[Model]
"""
result_rows = [ result_rows = [
self.model.from_row( self.model.from_row(
row=row, row=row,
@ -92,24 +128,14 @@ class QuerySet:
return self.model.merge_instances_list(result_rows) # type: ignore return self.model.merge_instances_list(result_rows) # type: ignore
return result_rows return result_rows
def _prepare_model_to_save(self, new_kwargs: dict) -> dict:
new_kwargs = self._remove_pk_from_kwargs(new_kwargs)
new_kwargs = self.model.substitute_models_with_pks(new_kwargs)
new_kwargs = self.model.populate_default_values(new_kwargs)
new_kwargs = self.model.translate_columns_to_aliases(new_kwargs)
return new_kwargs
def _remove_pk_from_kwargs(self, new_kwargs: dict) -> dict:
pkname = self.model_meta.pkname
pk = self.model_meta.model_fields[pkname]
if new_kwargs.get(pkname, ormar.Undefined) is None and (
pk.nullable or pk.autoincrement
):
del new_kwargs[pkname]
return new_kwargs
@staticmethod @staticmethod
def check_single_result_rows_count(rows: Sequence[Optional["Model"]]) -> None: def check_single_result_rows_count(rows: Sequence[Optional["Model"]]) -> None:
"""
Verifies if the result has one and only one row.
:param rows: one element list of Models
:type rows: List[Model]
"""
if not rows or rows[0] is None: if not rows or rows[0] is None:
raise NoMatch() raise NoMatch()
if len(rows) > 1: if len(rows) > 1:
@ -117,29 +143,84 @@ class QuerySet:
@property @property
def database(self) -> databases.Database: def database(self) -> databases.Database:
"""
Shortcut to models database from Meta class.
:return: database
:rtype: databases.Database
"""
return self.model_meta.database return self.model_meta.database
@property @property
def table(self) -> sqlalchemy.Table: def table(self) -> sqlalchemy.Table:
"""
Shortcut to models table from Meta class.
:return: database table
:rtype: sqlalchemy.Table
"""
return self.model_meta.table return self.model_meta.table
def build_select_expression(self) -> sqlalchemy.sql.select: def build_select_expression(
self, limit: int = None, offset: int = None, order_bys: List = None,
) -> sqlalchemy.sql.select:
"""
Constructs the actual database query used in the QuerySet.
If any of the params is not passed the QuerySet own value is used.
:param limit: number to limit the query
:type limit: int
:param offset: number to offset by
:type offset: int
:param order_bys: list of order-by fields names
:type order_bys: List
:return: built sqlalchemy select expression
:rtype: sqlalchemy.sql.selectable.Select
"""
qry = Query( qry = Query(
model_cls=self.model, model_cls=self.model,
select_related=self._select_related, select_related=self._select_related,
filter_clauses=self.filter_clauses, filter_clauses=self.filter_clauses,
exclude_clauses=self.exclude_clauses, exclude_clauses=self.exclude_clauses,
offset=self.query_offset, offset=offset or self.query_offset,
limit_count=self.limit_count, limit_count=limit or self.limit_count,
fields=self._columns, fields=self._columns,
exclude_fields=self._exclude_columns, exclude_fields=self._exclude_columns,
order_bys=self.order_bys, order_bys=order_bys or self.order_bys,
limit_raw_sql=self.limit_sql_raw,
) )
exp = qry.build_select_expression() exp = qry.build_select_expression()
# print(exp.compile(compile_kwargs={"literal_binds": True})) # print("\n", exp.compile(compile_kwargs={"literal_binds": True}))
return exp return exp
def filter(self, _exclude: bool = False, **kwargs: Any) -> "QuerySet": # noqa: A003 def filter(self, _exclude: bool = False, **kwargs: Any) -> "QuerySet": # noqa: A003
"""
Allows you to filter by any `Model` attribute/field
as well as to fetch instances, with a filter across an FK relationship.
You can use special filter suffix to change the filter operands:
* exact - like `album__name__exact='Malibu'` (exact match)
* iexact - like `album__name__iexact='malibu'` (exact match case insensitive)
* contains - like `album__name__contains='Mal'` (sql like)
* icontains - like `album__name__icontains='mal'` (sql like case insensitive)
* in - like `album__name__in=['Malibu', 'Barclay']` (sql in)
* gt - like `position__gt=3` (sql >)
* gte - like `position__gte=3` (sql >=)
* lt - like `position__lt=3` (sql <)
* lte - like `position__lte=3` (sql <=)
* startswith - like `album__name__startswith='Mal'` (exact start match)
* istartswith - like `album__name__istartswith='mal'` (case insensitive)
* endswith - like `album__name__endswith='ibu'` (exact end match)
* iendswith - like `album__name__iendswith='IBU'` (case insensitive)
:param _exclude: flag if it should be exclude or filter
:type _exclude: bool
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: filtered QuerySet
:rtype: QuerySet
"""
qryclause = QueryClause( qryclause = QueryClause(
model_cls=self.model, model_cls=self.model,
select_related=self._select_related, select_related=self._select_related,
@ -164,12 +245,47 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=self.order_bys, order_bys=self.order_bys,
prefetch_related=self._prefetch_related, prefetch_related=self._prefetch_related,
limit_raw_sql=self.limit_sql_raw,
) )
def exclude(self, **kwargs: Any) -> "QuerySet": # noqa: A003 def exclude(self, **kwargs: Any) -> "QuerySet": # noqa: A003
"""
Works exactly the same as filter and all modifiers (suffixes) are the same,
but returns a *not* condition.
So if you use `filter(name='John')` which is `where name = 'John'` in SQL,
the `exclude(name='John')` equals to `where name <> 'John'`
Note that all conditions are joined so if you pass multiple values it
becomes a union of conditions.
`exclude(name='John', age>=35)` will become
`where not (name='John' and age>=35)`
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: filtered QuerySet
:rtype: QuerySet
"""
return self.filter(_exclude=True, **kwargs) return self.filter(_exclude=True, **kwargs)
def select_related(self, related: Union[List, str]) -> "QuerySet": def select_related(self, related: Union[List, str]) -> "QuerySet":
"""
Allows to prefetch related models during the same query.
**With `select_related` always only one query is run against the database**,
meaning that one (sometimes complicated) join is generated and later nested
models are processed in python.
To fetch related model use `ForeignKey` names.
To chain related `Models` relation use double underscores between names.
:param related: list of relation field names, can be linked by '__' to nest
:type related: Union[List, str]
:return: QuerySet
:rtype: QuerySet
"""
if not isinstance(related, list): if not isinstance(related, list):
related = [related] related = [related]
@ -185,9 +301,27 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=self.order_bys, order_bys=self.order_bys,
prefetch_related=self._prefetch_related, prefetch_related=self._prefetch_related,
limit_raw_sql=self.limit_sql_raw,
) )
def prefetch_related(self, related: Union[List, str]) -> "QuerySet": def prefetch_related(self, related: Union[List, str]) -> "QuerySet":
"""
Allows to prefetch related models during query - but opposite to
`select_related` each subsequent model is fetched in a separate database query.
**With `prefetch_related` always one query per Model is run against the
database**, meaning that you will have multiple queries executed one
after another.
To fetch related model use `ForeignKey` names.
To chain related `Models` relation use double underscores between names.
:param related: list of relation field names, can be linked by '__' to nest
:type related: Union[List, str]
:return: QuerySet
:rtype: QuerySet
"""
if not isinstance(related, list): if not isinstance(related, list):
related = [related] related = [related]
@ -203,32 +337,52 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=self.order_bys, order_bys=self.order_bys,
prefetch_related=related, prefetch_related=related,
) limit_raw_sql=self.limit_sql_raw,
def exclude_fields(self, columns: Union[List, str, Set, Dict]) -> "QuerySet":
if isinstance(columns, str):
columns = [columns]
current_excluded = self._exclude_columns
if not isinstance(columns, dict):
current_excluded = update_dict_from_list(current_excluded, columns)
else:
current_excluded = update(current_excluded, columns)
return self.__class__(
model_cls=self.model,
filter_clauses=self.filter_clauses,
exclude_clauses=self.exclude_clauses,
select_related=self._select_related,
limit_count=self.limit_count,
offset=self.query_offset,
columns=self._columns,
exclude_columns=current_excluded,
order_bys=self.order_bys,
prefetch_related=self._prefetch_related,
) )
def fields(self, columns: Union[List, str, Set, Dict]) -> "QuerySet": def fields(self, columns: Union[List, str, Set, Dict]) -> "QuerySet":
"""
With `fields()` you can select subset of model columns to limit the data load.
Note that `fields()` and `exclude_fields()` works both for main models
(on normal queries like `get`, `all` etc.)
as well as `select_related` and `prefetch_related`
models (with nested notation).
You can select specified fields by passing a `str, List[str], Set[str] or
dict` with nested definition.
To include related models use notation
`{related_name}__{column}[__{optional_next} etc.]`.
`fields()` can be called several times, building up the columns to select.
If you include related models into `select_related()` call but you won't specify
columns for those models in fields - implies a list of all fields for
those nested models.
Mandatory fields cannot be excluded as it will raise `ValidationError`,
to exclude a field it has to be nullable.
Pk column cannot be excluded - it's always auto added even if
not explicitly included.
You can also pass fields to include as dictionary or set.
To mark a field as included in a dictionary use it's name as key
and ellipsis as value.
To traverse nested models use nested dictionaries.
To include fields at last level instead of nested dictionary a set can be used.
To include whole nested model specify model related field name and ellipsis.
:param columns: columns to include
:type columns: Union[List, str, Set, Dict]
:return: QuerySet
:rtype: QuerySet
"""
if isinstance(columns, str): if isinstance(columns, str):
columns = [columns] columns = [columns]
@ -249,9 +403,91 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=self.order_bys, order_bys=self.order_bys,
prefetch_related=self._prefetch_related, prefetch_related=self._prefetch_related,
limit_raw_sql=self.limit_sql_raw,
)
def exclude_fields(self, columns: Union[List, str, Set, Dict]) -> "QuerySet":
"""
With `exclude_fields()` you can select subset of model columns that will
be excluded to limit the data load.
It's the opposite of `fields()` method so check documentation above
to see what options are available.
Especially check above how you can pass also nested dictionaries
and sets as a mask to exclude fields from whole hierarchy.
Note that `fields()` and `exclude_fields()` works both for main models
(on normal queries like `get`, `all` etc.)
as well as `select_related` and `prefetch_related` models
(with nested notation).
Mandatory fields cannot be excluded as it will raise `ValidationError`,
to exclude a field it has to be nullable.
Pk column cannot be excluded - it's always auto added even
if explicitly excluded.
:param columns: columns to exclude
:type columns: Union[List, str, Set, Dict]
:return: QuerySet
:rtype: QuerySet
"""
if isinstance(columns, str):
columns = [columns]
current_excluded = self._exclude_columns
if not isinstance(columns, dict):
current_excluded = update_dict_from_list(current_excluded, columns)
else:
current_excluded = update(current_excluded, columns)
return self.__class__(
model_cls=self.model,
filter_clauses=self.filter_clauses,
exclude_clauses=self.exclude_clauses,
select_related=self._select_related,
limit_count=self.limit_count,
offset=self.query_offset,
columns=self._columns,
exclude_columns=current_excluded,
order_bys=self.order_bys,
prefetch_related=self._prefetch_related,
limit_raw_sql=self.limit_sql_raw,
) )
def order_by(self, columns: Union[List, str]) -> "QuerySet": def order_by(self, columns: Union[List, str]) -> "QuerySet":
"""
With `order_by()` you can order the results from database based on your
choice of fields.
You can provide a string with field name or list of strings with fields names.
Ordering in sql will be applied in order of names you provide in order_by.
By default if you do not provide ordering `ormar` explicitly orders by
all primary keys
If you are sorting by nested models that causes that the result rows are
unsorted by the main model `ormar` will combine those children rows into
one main model.
The main model will never duplicate in the result
To order by main model field just provide a field name
To sort on nested models separate field names with dunder '__'.
You can sort this way across all relation types -> `ForeignKey`,
reverse virtual FK and `ManyToMany` fields.
To sort in descending order provide a hyphen in front of the field name
:param columns: columns by which models should be sorted
:type columns: Union[List, str]
:return: QuerySet
:rtype: QuerySet
"""
if not isinstance(columns, list): if not isinstance(columns, list):
columns = [columns] columns = [columns]
@ -267,19 +503,47 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=order_bys, order_bys=order_bys,
prefetch_related=self._prefetch_related, prefetch_related=self._prefetch_related,
limit_raw_sql=self.limit_sql_raw,
) )
async def exists(self) -> bool: async def exists(self) -> bool:
"""
Returns a bool value to confirm if there are rows matching the given criteria
(applied with `filter` and `exclude` if set).
:return: result of the check
:rtype: bool
"""
expr = self.build_select_expression() expr = self.build_select_expression()
expr = sqlalchemy.exists(expr).select() expr = sqlalchemy.exists(expr).select()
return await self.database.fetch_val(expr) return await self.database.fetch_val(expr)
async def count(self) -> int: async def count(self) -> int:
"""
Returns number of rows matching the given criteria
(applied with `filter` and `exclude` if set before).
:return: number of rows
:rtype: int
"""
expr = self.build_select_expression().alias("subquery_for_count") expr = self.build_select_expression().alias("subquery_for_count")
expr = sqlalchemy.func.count().select().select_from(expr) expr = sqlalchemy.func.count().select().select_from(expr)
return await self.database.fetch_val(expr) return await self.database.fetch_val(expr)
async def update(self, each: bool = False, **kwargs: Any) -> int: async def update(self, each: bool = False, **kwargs: Any) -> int:
"""
Updates the model table after applying the filters from kwargs.
You have to either pass a filter to narrow down a query or explicitly pass
each=True flag to affect whole table.
:param each: flag if whole table should be affected if no filter is passed
:type each: bool
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: number of updated rows
:rtype: int
"""
self_fields = self.model.extract_db_own_fields().union( self_fields = self.model.extract_db_own_fields().union(
self.model.extract_related_names() self.model.extract_related_names()
) )
@ -296,6 +560,19 @@ class QuerySet:
return await self.database.execute(expr) return await self.database.execute(expr)
async def delete(self, each: bool = False, **kwargs: Any) -> int: async def delete(self, each: bool = False, **kwargs: Any) -> int:
"""
Deletes from the model table after applying the filters from kwargs.
You have to either pass a filter to narrow down a query or explicitly pass
each=True flag to affect whole table.
:param each: flag if whole table should be affected if no filter is passed
:type each: bool
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: number of deleted rows
:rtype:int
"""
if kwargs: if kwargs:
return await self.filter(**kwargs).delete() return await self.filter(**kwargs).delete()
if not each and not self.filter_clauses: if not each and not self.filter_clauses:
@ -308,7 +585,21 @@ class QuerySet:
) )
return await self.database.execute(expr) return await self.database.execute(expr)
def limit(self, limit_count: int) -> "QuerySet": def limit(self, limit_count: int, limit_raw_sql: bool = None) -> "QuerySet":
"""
You can limit the results to desired number of parent models.
To limit the actual number of database query rows instead of number of main
models use the `limit_raw_sql` parameter flag, and set it to `True`.
:param limit_raw_sql: flag if raw sql should be limited
:type limit_raw_sql: bool
:param limit_count: number of models to limit
:type limit_count: int
:return: QuerySet
:rtype: QuerySet
"""
limit_raw_sql = self.limit_sql_raw if limit_raw_sql is None else limit_raw_sql
return self.__class__( return self.__class__(
model_cls=self.model, model_cls=self.model,
filter_clauses=self.filter_clauses, filter_clauses=self.filter_clauses,
@ -320,9 +611,24 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=self.order_bys, order_bys=self.order_bys,
prefetch_related=self._prefetch_related, prefetch_related=self._prefetch_related,
limit_raw_sql=limit_raw_sql,
) )
def offset(self, offset: int) -> "QuerySet": def offset(self, offset: int, limit_raw_sql: bool = None) -> "QuerySet":
"""
You can also offset the results by desired number of main models.
To offset the actual number of database query rows instead of number of main
models use the `limit_raw_sql` parameter flag, and set it to `True`.
:param limit_raw_sql: flag if raw sql should be offset
:type limit_raw_sql: bool
:param offset: numbers of models to offset
:type offset: int
:return: QuerySet
:rtype: QuerySet
"""
limit_raw_sql = self.limit_sql_raw if limit_raw_sql is None else limit_raw_sql
return self.__class__( return self.__class__(
model_cls=self.model, model_cls=self.model,
filter_clauses=self.filter_clauses, filter_clauses=self.filter_clauses,
@ -334,23 +640,57 @@ class QuerySet:
exclude_columns=self._exclude_columns, exclude_columns=self._exclude_columns,
order_bys=self.order_bys, order_bys=self.order_bys,
prefetch_related=self._prefetch_related, prefetch_related=self._prefetch_related,
limit_raw_sql=limit_raw_sql,
) )
async def first(self, **kwargs: Any) -> "Model": async def first(self, **kwargs: Any) -> "Model":
"""
Gets the first row from the db ordered by primary key column ascending.
:raises NoMatch: if no rows are returned
:raises MultipleMatches: if more than 1 row is returned.
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: returned model
:rtype: Model
"""
if kwargs: if kwargs:
return await self.filter(**kwargs).first() return await self.filter(**kwargs).first()
rows = await self.limit(1).all() expr = self.build_select_expression(
self.check_single_result_rows_count(rows) limit=1, order_bys=[f"{self.model.Meta.pkname}"] + self.order_bys
return rows[0] # type: ignore )
rows = await self.database.fetch_all(expr)
processed_rows = self._process_query_result_rows(rows)
if self._prefetch_related and processed_rows:
processed_rows = await self._prefetch_related_models(processed_rows, rows)
self.check_single_result_rows_count(processed_rows)
return processed_rows[0] # type: ignore
async def get(self, **kwargs: Any) -> "Model": async def get(self, **kwargs: Any) -> "Model":
"""
Get's the first row from the db meeting the criteria set by kwargs.
If no criteria set it will return the last row in db sorted by pk.
Passing a criteria is actually calling filter(**kwargs) method described below.
:raises NoMatch: if no rows are returned
:raises MultipleMatches: if more than 1 row is returned.
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: returned model
:rtype: Model
"""
if kwargs: if kwargs:
return await self.filter(**kwargs).get() return await self.filter(**kwargs).get()
expr = self.build_select_expression()
if not self.filter_clauses: if not self.filter_clauses:
expr = expr.limit(2) expr = self.build_select_expression(
limit=1, order_bys=[f"-{self.model.Meta.pkname}"] + self.order_bys
)
else:
expr = self.build_select_expression()
rows = await self.database.fetch_all(expr) rows = await self.database.fetch_all(expr)
processed_rows = self._process_query_result_rows(rows) processed_rows = self._process_query_result_rows(rows)
@ -360,12 +700,32 @@ class QuerySet:
return processed_rows[0] # type: ignore return processed_rows[0] # type: ignore
async def get_or_create(self, **kwargs: Any) -> "Model": async def get_or_create(self, **kwargs: Any) -> "Model":
"""
Combination of create and get methods.
Tries to get a row meeting the criteria fro kwargs
and if `NoMatch` exception is raised
it creates a new one with given kwargs.
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: returned or created Model
:rtype: Model
"""
try: try:
return await self.get(**kwargs) return await self.get(**kwargs)
except NoMatch: except NoMatch:
return await self.create(**kwargs) return await self.create(**kwargs)
async def update_or_create(self, **kwargs: Any) -> "Model": async def update_or_create(self, **kwargs: Any) -> "Model":
"""
Updates the model, or in case there is no match in database creates a new one.
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: updated or created model
:rtype: Model
"""
pk_name = self.model_meta.pkname pk_name = self.model_meta.pkname
if "pk" in kwargs: if "pk" in kwargs:
kwargs[pk_name] = kwargs.pop("pk") kwargs[pk_name] = kwargs.pop("pk")
@ -375,6 +735,18 @@ class QuerySet:
return await model.update(**kwargs) return await model.update(**kwargs)
async def all(self, **kwargs: Any) -> Sequence[Optional["Model"]]: # noqa: A003 async def all(self, **kwargs: Any) -> Sequence[Optional["Model"]]: # noqa: A003
"""
Returns all rows from a database for given model for set filter options.
Passing kwargs is a shortcut and equals to calling `filter(**kwrags).all()`.
If there are no rows meeting the criteria an empty list is returned.
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: list of returned models
:rtype: List[Model]
"""
if kwargs: if kwargs:
return await self.filter(**kwargs).all() return await self.filter(**kwargs).all()
@ -387,9 +759,19 @@ class QuerySet:
return result_rows return result_rows
async def create(self, **kwargs: Any) -> "Model": async def create(self, **kwargs: Any) -> "Model":
"""
Creates the model instance, saves it in a database and returns the updates model
(with pk populated if not passed and autoincrement is set).
The allowed kwargs are `Model` fields names and proper value types.
:param kwargs: fields names and proper value types
:type kwargs: Any
:return: created model
:rtype: Model
"""
new_kwargs = dict(**kwargs) new_kwargs = dict(**kwargs)
new_kwargs = self._prepare_model_to_save(new_kwargs) new_kwargs = self.model.prepare_model_to_save(new_kwargs)
expr = self.table.insert() expr = self.table.insert()
expr = expr.values(**new_kwargs) expr = expr.values(**new_kwargs)
@ -420,10 +802,22 @@ class QuerySet:
return instance return instance
async def bulk_create(self, objects: List["Model"]) -> None: async def bulk_create(self, objects: List["Model"]) -> None:
"""
Performs a bulk update in one database session to speed up the process.
Allows you to create multiple objects at once.
A valid list of `Model` objects needs to be passed.
Bulk operations do not send signals.
:param objects: list of ormar models already initialized and ready to save.
:type objects: List[Model]
"""
ready_objects = [] ready_objects = []
for objt in objects: for objt in objects:
new_kwargs = objt.dict() new_kwargs = objt.dict()
new_kwargs = self._prepare_model_to_save(new_kwargs) new_kwargs = objt.prepare_model_to_save(new_kwargs)
ready_objects.append(new_kwargs) ready_objects.append(new_kwargs)
expr = self.table.insert() expr = self.table.insert()
@ -435,6 +829,23 @@ class QuerySet:
async def bulk_update( # noqa: CCR001 async def bulk_update( # noqa: CCR001
self, objects: List["Model"], columns: List[str] = None self, objects: List["Model"], columns: List[str] = None
) -> None: ) -> None:
"""
Performs bulk update in one database session to speed up the process.
Allows to update multiple instance at once.
All `Models` passed need to have primary key column populated.
You can also select which fields to update by passing `columns` list
as a list of string names.
Bulk operations do not send signals.
:param objects: list of ormar models
:type objects: List[Model]
:param columns: list of columns to update
:type columns: List[str]
"""
ready_objects = [] ready_objects = []
pk_name = self.model_meta.pkname pk_name = self.model_meta.pkname
if not columns: if not columns:

View File

@ -18,6 +18,22 @@ if TYPE_CHECKING: # pragma no cover
def check_node_not_dict_or_not_last_node( def check_node_not_dict_or_not_last_node(
part: str, parts: List, current_level: Any part: str, parts: List, current_level: Any
) -> bool: ) -> bool:
"""
Checks if given name is not present in the current level of the structure.
Checks if given name is not the last name in the split list of parts.
Checks if the given name in current level is not a dictionary.
All those checks verify if there is a need for deeper traversal.
:param part:
:type part: str
:param parts:
:type parts: List[str]
:param current_level: current level of the traversed structure
:type current_level: Any
:return: result of the check
:rtype: bool
"""
return (part not in current_level and part != parts[-1]) or ( return (part not in current_level and part != parts[-1]) or (
part in current_level and not isinstance(current_level[part], dict) part in current_level and not isinstance(current_level[part], dict)
) )
@ -26,6 +42,21 @@ def check_node_not_dict_or_not_last_node(
def translate_list_to_dict( # noqa: CCR001 def translate_list_to_dict( # noqa: CCR001
list_to_trans: Union[List, Set], is_order: bool = False list_to_trans: Union[List, Set], is_order: bool = False
) -> Dict: ) -> Dict:
"""
Splits the list of strings by '__' and converts them to dictionary with nested
models grouped by parent model. That way each model appears only once in the whole
dictionary and children are grouped under parent name.
Default required key ise Ellipsis like in pydantic.
:param list_to_trans: input list
:type list_to_trans: set
:param is_order: flag if change affects order_by clauses are they require special
default value with sort order.
:type is_order: bool
:return: converted to dictionary input list
:rtype: Dict
"""
new_dict: Dict = dict() new_dict: Dict = dict()
for path in list_to_trans: for path in list_to_trans:
current_level = new_dict current_level = new_dict
@ -50,6 +81,15 @@ def translate_list_to_dict( # noqa: CCR001
def convert_set_to_required_dict(set_to_convert: set) -> Dict: def convert_set_to_required_dict(set_to_convert: set) -> Dict:
"""
Converts set to dictionary of required keys.
Required key is Ellipsis.
:param set_to_convert: set to convert to dict
:type set_to_convert: set
:return: set converted to dict of ellipsis
:rtype: Dict
"""
new_dict = dict() new_dict = dict()
for key in set_to_convert: for key in set_to_convert:
new_dict[key] = Ellipsis new_dict[key] = Ellipsis
@ -57,6 +97,19 @@ def convert_set_to_required_dict(set_to_convert: set) -> Dict:
def update(current_dict: Any, updating_dict: Any) -> Dict: # noqa: CCR001 def update(current_dict: Any, updating_dict: Any) -> Dict: # noqa: CCR001
"""
Update one dict with another but with regard for nested keys.
That way nested sets are unionised, dicts updated and
only other values are overwritten.
:param current_dict: dict to update
:type current_dict: Dict[str, ellipsis]
:param updating_dict: dict with values to update
:type updating_dict: Dict
:return: combination of both dicts
:rtype: Dict
"""
if current_dict is Ellipsis: if current_dict is Ellipsis:
current_dict = dict() current_dict = dict()
for key, value in updating_dict.items(): for key, value in updating_dict.items():
@ -73,6 +126,17 @@ def update(current_dict: Any, updating_dict: Any) -> Dict: # noqa: CCR001
def update_dict_from_list(curr_dict: Dict, list_to_update: Union[List, Set]) -> Dict: def update_dict_from_list(curr_dict: Dict, list_to_update: Union[List, Set]) -> Dict:
"""
Converts the list into dictionary and later performs special update, where
nested keys that are sets or dicts are combined and not overwritten.
:param curr_dict: dict to update
:type curr_dict: Dict
:param list_to_update: list with values to update the dict
:type list_to_update: List[str]
:return: updated dict
:rtype: Dict
"""
updated_dict = copy.copy(curr_dict) updated_dict = copy.copy(curr_dict)
dict_to_update = translate_list_to_dict(list_to_update) dict_to_update = translate_list_to_dict(list_to_update)
update(updated_dict, dict_to_update) update(updated_dict, dict_to_update)
@ -82,6 +146,25 @@ def update_dict_from_list(curr_dict: Dict, list_to_update: Union[List, Set]) ->
def extract_nested_models( # noqa: CCR001 def extract_nested_models( # noqa: CCR001
model: "Model", model_type: Type["Model"], select_dict: Dict, extracted: Dict model: "Model", model_type: Type["Model"], select_dict: Dict, extracted: Dict
) -> None: ) -> None:
"""
Iterates over model relations and extracts all nested models from select_dict and
puts them in corresponding list under relation name in extracted dict.keys
Basically flattens all relation to dictionary of all related models, that can be
used on several models and extract all of their children into dictionary of lists
witch children models.
Goes also into nested relations if needed (specified in select_dict).
:param model: parent Model
:type model: Model
:param model_type: parent model class
:type model_type: Type[Model]
:param select_dict: dictionary of related models from select_related
:type select_dict: Dict
:param extracted: dictionary with already extracted models
:type extracted: Dict
"""
follow = [rel for rel in model_type.extract_related_names() if rel in select_dict] follow = [rel for rel in model_type.extract_related_names() if rel in select_dict]
for related in follow: for related in follow:
child = getattr(model, related) child = getattr(model, related)
@ -108,6 +191,22 @@ def extract_models_to_dict_of_lists(
select_dict: Dict, select_dict: Dict,
extracted: Dict = None, extracted: Dict = None,
) -> Dict: ) -> Dict:
"""
Receives a list of models and extracts all of the children and their children
into dictionary of lists with children models, flattening the structure to one dict
with all children models under their relation keys.
:param model_type: parent model class
:type model_type: Type[Model]
:param models: list of models from which related models should be extracted.
:type models: List[Model]
:param select_dict: dictionary of related models from select_related
:type select_dict: Dict
:param extracted: dictionary with already extracted models
:type extracted: Dict
:return: dictionary of lists f related models
:rtype: Dict
"""
if not extracted: if not extracted:
extracted = dict() extracted = dict()
for model in models: for model in models:

View File

@ -1,3 +1,7 @@
"""
Package handles relations on models, returning related models on calls and exposing
QuerySetProxy for m2m and reverse relations.
"""
from ormar.relations.alias_manager import AliasManager from ormar.relations.alias_manager import AliasManager
from ormar.relations.relation import Relation, RelationType from ormar.relations.relation import Relation, RelationType
from ormar.relations.relation_manager import RelationsManager from ormar.relations.relation_manager import RelationsManager

View File

@ -11,11 +11,25 @@ if TYPE_CHECKING: # pragma: no cover
def get_table_alias() -> str: def get_table_alias() -> str:
"""
Creates a random string that is used to alias tables in joins.
It's necessary that each relation has it's own aliases cause you can link
to the same target tables from multiple fields on one model as well as from
multiple different models in one join.
:return: randomly generated alias
:rtype: str
"""
alias = "".join(choices(string.ascii_uppercase, k=2)) + uuid.uuid4().hex[:4] alias = "".join(choices(string.ascii_uppercase, k=2)) + uuid.uuid4().hex[:4]
return alias.lower() return alias.lower()
class AliasManager: class AliasManager:
"""
Keep all aliases of relations between different tables.
One global instance is shared between all models.
"""
def __init__(self) -> None: def __init__(self) -> None:
self._aliases: Dict[str, str] = dict() self._aliases: Dict[str, str] = dict()
self._aliases_new: Dict[str, str] = dict() self._aliases_new: Dict[str, str] = dict()
@ -24,6 +38,22 @@ class AliasManager:
def prefixed_columns( def prefixed_columns(
alias: str, table: sqlalchemy.Table, fields: List = None alias: str, table: sqlalchemy.Table, fields: List = None
) -> List[text]: ) -> List[text]:
"""
Creates a list of aliases sqlalchemy text clauses from
string alias and sqlalchemy.Table.
Optional list of fields to include can be passed to extract only those columns.
List has to have sqlalchemy names of columns (ormar aliases) not the ormar ones.
:param alias: alias of given table
:type alias: str
:param table: table from which fields should be aliased
:type table: sqlalchemy.Table
:param fields: fields to include
:type fields: Optional[List[str]]
:return: list of sqlalchemy text clauses with "column name as aliased name"
:rtype: List[text]
"""
alias = f"{alias}_" if alias else "" alias = f"{alias}_" if alias else ""
all_columns = ( all_columns = (
table.columns table.columns
@ -37,11 +67,49 @@ class AliasManager:
@staticmethod @staticmethod
def prefixed_table_name(alias: str, name: str) -> text: def prefixed_table_name(alias: str, name: str) -> text:
"""
Creates text clause with table name with aliased name.
:param alias: alias of given table
:type alias: str
:param name: table name
:type name: str
:return: sqlalchemy text clause as "table_name aliased_name"
:rtype: sqlalchemy text clause
"""
return text(f"{name} {alias}_{name}") return text(f"{name} {alias}_{name}")
def add_relation_type_new( def add_relation_type(
self, source_model: Type["Model"], relation_name: str, is_multi: bool = False self,
source_model: Type["Model"],
relation_name: str,
reverse_name: str = None,
is_multi: bool = False,
) -> None: ) -> None:
"""
Registers the relations defined in ormar models.
Given the relation it registers also the reverse side of this relation.
Used by both ForeignKey and ManyToMany relations.
Each relation is registered as Model name and relation name.
Each alias registered has to be unique.
Aliases are used to construct joins to assure proper links between tables.
That way you can link to the same target tables from multiple fields
on one model as well as from multiple different models in one join.
:param source_model: model with relation defined
:type source_model: source Model
:param relation_name: name of the relation to define
:type relation_name: str
:param reverse_name: name of related_name fo given relation for m2m relations
:type reverse_name: Optional[str]
:param is_multi: flag if relation being registered is a through m2m model
:type is_multi: bool
:return: none
:rtype: None
"""
parent_key = f"{source_model.get_name()}_{relation_name}" parent_key = f"{source_model.get_name()}_{relation_name}"
if parent_key not in self._aliases_new: if parent_key not in self._aliases_new:
self._aliases_new[parent_key] = get_table_alias() self._aliases_new[parent_key] = get_table_alias()
@ -49,15 +117,24 @@ class AliasManager:
child_model = to_field.to child_model = to_field.to
related_name = to_field.related_name related_name = to_field.related_name
if not related_name: if not related_name:
related_name = child_model.resolve_relation_name( related_name = reverse_name if is_multi else source_model.get_name() + "s"
child_model, source_model, explicit_multi=is_multi
)
child_key = f"{child_model.get_name()}_{related_name}" child_key = f"{child_model.get_name()}_{related_name}"
if child_key not in self._aliases_new: if child_key not in self._aliases_new:
self._aliases_new[child_key] = get_table_alias() self._aliases_new[child_key] = get_table_alias()
def resolve_relation_join_new( def resolve_relation_alias(
self, from_model: Type["Model"], relation_name: str self, from_model: Type["Model"], relation_name: str
) -> str: ) -> str:
"""
Given model and relation name returns the alias for this relation.
:param from_model: model with relation defined
:type from_model: source Model
:param relation_name: name of the relation field
:type relation_name: str
:return: alias of the relation
:rtype: str
"""
alias = self._aliases_new.get(f"{from_model.get_name()}_{relation_name}", "") alias = self._aliases_new.get(f"{from_model.get_name()}_{relation_name}", "")
return alias return alias

Some files were not shown because too many files have changed in this diff Show More