-
-
Notifications
You must be signed in to change notification settings - Fork 338
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add link Table API #1374
Add link Table API #1374
Changes from 16 commits
c983bcc
83dba6e
de99d78
687a702
d98ed6a
2a253e7
06a4a67
ec95f6d
d5289f4
be623a7
5e829ed
d5dff57
fb8f7cd
c822407
da4d4a8
e0b10e6
5f5294e
3dc41c0
e8d392d
4d24f8e
d8e5a85
5e6f877
0077c9b
56dd2e6
91d2e2d
dc3da74
48dd06b
ab1084d
3da93cb
23d4ada
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
from alembic.operations import Operations | ||
from alembic.migration import MigrationContext | ||
from sqlalchemy import MetaData | ||
|
||
from db.columns.base import MathesarColumn | ||
from db.constraints.utils import naming_convention | ||
from db.tables.operations.select import reflect_table_from_oid, reflect_tables_from_oids | ||
from db.tables.utils import get_primary_key_column | ||
|
||
|
||
def create_foreign_key_link(engine, schema, column_name, referrer_table_oid, referent_table_oid, unique_link=False): | ||
with engine.begin() as conn: | ||
referent_table = reflect_table_from_oid(referent_table_oid, engine, conn) | ||
referrer_table = reflect_table_from_oid(referrer_table_oid, engine, conn) | ||
primary_key_column = get_primary_key_column(referent_table) | ||
metadata = MetaData(bind=engine, schema=schema, naming_convention=naming_convention) | ||
opts = { | ||
'target_metadata': metadata | ||
} | ||
ctx = MigrationContext.configure(conn, opts=opts) | ||
op = Operations(ctx) | ||
column = MathesarColumn( | ||
column_name, primary_key_column.type | ||
) | ||
op.add_column(referrer_table.name, column, schema=schema) | ||
if unique_link: | ||
op.create_unique_constraint(None, referrer_table.name, [column_name], schema=schema) | ||
op.create_foreign_key( | ||
None, | ||
referrer_table.name, | ||
referent_table.name, | ||
[column.name], | ||
[primary_key_column.name], | ||
source_schema=schema, | ||
referent_schema=schema | ||
) | ||
|
||
|
||
def create_many_to_many_link(engine, schema, map_table_name, referents): | ||
with engine.begin() as conn: | ||
referent_tables_oid = [referent['referent_table'] for referent in referents] | ||
referent_tables = reflect_tables_from_oids(referent_tables_oid, engine, conn) | ||
metadata = MetaData(bind=engine, schema=schema, naming_convention=naming_convention) | ||
opts = { | ||
'target_metadata': metadata | ||
} | ||
ctx = MigrationContext.configure(conn, opts=opts) | ||
op = Operations(ctx) | ||
op.create_table(map_table_name, schema=schema) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you should use the standard Mathesar function for creating a table. If nothing else, we'll want the default columns so this fits into the rest of the framework, correct? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
def test_table_creation(engine_with_schema):
engine, schema = engine_with_schema
supported_types = get_supported_alter_column_types(
engine, friendly_names=False,
)
sa_type = supported_types.get("INTEGER")
metadata = MetaData(bind=engine, schema=schema, naming_convention=naming_convention)
with engine.begin() as conn:
metadata.reflect()
opts = {
'target_metadata': metadata
}
ctx = MigrationContext.configure(conn, opts=opts)
referent_column = MathesarColumn(
"reference",
sa_type()
)
target_table = create_mathesar_table(
"target_table",
schema,
[referent_column],
engine,
metadata,
)
fk_column = MathesarColumn(
"fk_col",
sa_type()
)
base_table = create_mathesar_table(
"base_table",
schema,
[fk_column],
engine,
metadata
)
op = Operations(ctx)
# op.create_unique_constraint(None, target_table.name, [referent_column.name], schema=schema)
try:
op.create_foreign_key(
None, base_table.name,
target_table.name,
[fk_column.name], [referent_column.name], source_schema=schema, referent_schema=schema
)
except:
pass
schema_obj = reflect_schema(engine, schema)
tables = get_table_oids_from_schema(schema_obj.oid, engine)
# Table should not be created as foreign key creation step would have failed due to referent column missing the unique constraint
assert len(tables) == 0 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The transaction fails if https://github.com/centerofci/mathesar/blob/b2467dcf63de786c0e8063821ff317ef608ec3a6/db/tables/operations/create.py#L28 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Argh, what a pain. I think it would still be better to use the I really want to avoid ad-hoc table creation calls scattered through the codebase since we already have a default column involved and we're going to add more default metadata, etc. over time. |
||
for referent in referents: | ||
referent_table_oid = referent['referent_table'] | ||
referent_table = referent_tables[referent_table_oid] | ||
col_name = referent['column_name'] | ||
primary_key_column = get_primary_key_column(referent_table) | ||
column = MathesarColumn( | ||
col_name, primary_key_column.type | ||
) | ||
op.add_column(map_table_name, column, schema=schema) | ||
op.create_foreign_key( | ||
None, | ||
map_table_name, | ||
referent_table.name, | ||
[column.name], | ||
[primary_key_column.name], | ||
source_schema=schema, | ||
referent_schema=schema | ||
) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why not use your There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seemed redundant as both the signature are the same, but I agree it would be better to use a common API. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In that case, consider extracting the common code from the two functions and using it as a private method. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
from rest_framework.mixins import CreateModelMixin, ListModelMixin | ||
from rest_framework.viewsets import GenericViewSet | ||
from mathesar.api.pagination import DefaultLimitOffsetPagination | ||
from mathesar.api.serializers.links import LinksMappingSerializer | ||
|
||
|
||
class LinkViewSet(CreateModelMixin, ListModelMixin, GenericViewSet): | ||
serializer_class = LinksMappingSerializer | ||
pagination_class = DefaultLimitOffsetPagination | ||
|
||
def get_queryset(self): | ||
return [] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
from rest_framework import serializers | ||
|
||
from db.links.operations.create import create_foreign_key_link, create_many_to_many_link | ||
from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin | ||
from mathesar.api.exceptions.validation_exceptions.exceptions import ColumnSizeMismatchAPIException | ||
from mathesar.api.serializers.shared_serializers import ( | ||
MathesarPolymorphicErrorMixin, | ||
ReadWritePolymorphicSerializerMappingMixin, | ||
) | ||
from mathesar.models import Table | ||
|
||
|
||
class OneToOneSerializer(MathesarErrorMessageMixin, serializers.Serializer): | ||
referent_column_name = serializers.CharField() | ||
reference_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all()) | ||
referent_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all()) | ||
|
||
def is_link_unique(self): | ||
return True | ||
|
||
def create(self, validated_data): | ||
reference_table = validated_data['reference_table'] | ||
create_foreign_key_link( | ||
reference_table.schema._sa_engine, | ||
reference_table._sa_table.schema, | ||
validated_data.get('referent_column_name'), | ||
reference_table.oid, | ||
validated_data.get('referent_table').oid, | ||
unique_link=self.is_link_unique() | ||
) | ||
return validated_data | ||
|
||
|
||
class OneToManySerializer(OneToOneSerializer): | ||
|
||
def is_link_unique(self): | ||
return False | ||
|
||
|
||
class MapColumnSerializer(serializers.Serializer): | ||
column_name = serializers.CharField() | ||
referent_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all()) | ||
|
||
|
||
class ManyToManySerializer(MathesarErrorMessageMixin, serializers.Serializer): | ||
referents = MapColumnSerializer(many=True) | ||
mapping_table_name = serializers.CharField() | ||
|
||
def create(self, validated_data): | ||
referents = validated_data['referents'] | ||
referent_tables_oid = [ | ||
{'referent_table': map_table_obj['referent_table'].oid, 'column_name': map_table_obj['column_name']} for | ||
map_table_obj in validated_data['referents']] | ||
create_many_to_many_link( | ||
referents[0]['referent_table'].schema._sa_engine, | ||
referents[0]['referent_table']._sa_table.schema, | ||
validated_data.get('mapping_table_name'), | ||
referent_tables_oid, | ||
) | ||
return validated_data | ||
|
||
|
||
class LinksMappingSerializer( | ||
MathesarPolymorphicErrorMixin, | ||
ReadWritePolymorphicSerializerMappingMixin, | ||
serializers.Serializer | ||
): | ||
def create(self, validated_data): | ||
serializer = self.serializers_mapping.get(self.get_mapping_field()) | ||
return serializer.create(validated_data) | ||
|
||
serializers_mapping = { | ||
"one-to-one": OneToOneSerializer, | ||
"one-to-many": OneToManySerializer, | ||
"many-to-many": ManyToManySerializer | ||
} | ||
link_type = serializers.CharField(required=True) | ||
|
||
def get_mapping_field(self): | ||
link_type = self.initial_data.get('link_type', None) | ||
if link_type is None: | ||
raise ColumnSizeMismatchAPIException() | ||
return link_type |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,115 @@ | ||
import pytest | ||
from sqlalchemy import Column, Integer, MetaData, String | ||
from sqlalchemy import Table as SATable | ||
from django.core.cache import cache | ||
|
||
from db.tables.operations.select import get_oid_from_table | ||
from db.tests.types import fixtures | ||
from mathesar import models | ||
|
||
engine_with_types = fixtures.engine_with_types | ||
engine_email_type = fixtures.engine_email_type | ||
temporary_testing_schema = fixtures.temporary_testing_schema | ||
|
||
|
||
@pytest.fixture | ||
def column_test_table(patent_schema): | ||
engine = patent_schema._sa_engine | ||
column_list_in = [ | ||
Column("mycolumn0", Integer, primary_key=True), | ||
Column("mycolumn1", Integer, nullable=False), | ||
Column("mycolumn2", Integer, server_default="5"), | ||
Column("mycolumn3", String), | ||
] | ||
db_table = SATable( | ||
"anewtable", | ||
MetaData(bind=engine), | ||
*column_list_in, | ||
schema=patent_schema.name | ||
) | ||
db_table.create() | ||
db_table_oid = get_oid_from_table(db_table.name, db_table.schema, engine) | ||
table = models.Table.current_objects.create(oid=db_table_oid, schema=patent_schema) | ||
return table | ||
|
||
|
||
def test_one_to_one_link_create(column_test_table, client, create_table): | ||
cache.clear() | ||
table_2 = create_table('Table 2') | ||
data = { | ||
"link_type": "one-to-one", | ||
"referent_column_name": "col_1", | ||
"reference_table": table_2.id, | ||
"referent_table": column_test_table.id, | ||
} | ||
response = client.post( | ||
"/api/db/v0/links/", | ||
data=data, | ||
) | ||
assert response.status_code == 201 | ||
|
||
|
||
def test_one_to_many_link_create(column_test_table, client, create_table): | ||
cache.clear() | ||
table_2 = create_table('Table 2') | ||
data = { | ||
"link_type": "one-to-many", | ||
"referent_column_name": "col_1", | ||
"reference_table": table_2.id, | ||
"referent_table": column_test_table.id, | ||
} | ||
response = client.post( | ||
"/api/db/v0/links/", | ||
data=data, | ||
) | ||
assert response.status_code == 201 | ||
|
||
|
||
def test_one_to_many_self_referential_link_create(column_test_table, client): | ||
cache.clear() | ||
data = { | ||
"link_type": "one-to-many", | ||
"referent_column_name": "col_1", | ||
"reference_table": column_test_table.id, | ||
"referent_table": column_test_table.id, | ||
} | ||
response = client.post( | ||
"/api/db/v0/links/", | ||
data=data, | ||
) | ||
assert response.status_code == 201 | ||
|
||
|
||
def test_many_to_many_self_referential_link_create(column_test_table, client): | ||
cache.clear() | ||
data = { | ||
"link_type": "many-to-many", | ||
"mapping_table_name": "map_table", | ||
"referents": [ | ||
{'referent_table': column_test_table.id, 'column_name': "link_1"}, | ||
{'referent_table': column_test_table.id, 'column_name': "link_2"} | ||
], | ||
} | ||
response = client.post( | ||
"/api/db/v0/links/", | ||
data=data, | ||
) | ||
assert response.status_code == 201 | ||
|
||
|
||
def test_many_to_many_link_create(column_test_table, client, create_table): | ||
cache.clear() | ||
table_2 = create_table('Table 2') | ||
data = { | ||
"link_type": "many-to-many", | ||
"mapping_table_name": "map_table", | ||
"referents": [ | ||
{'referent_table': column_test_table.id, 'column_name': "link_1"}, | ||
{'referent_table': table_2.id, 'column_name': "link_2"} | ||
], | ||
} | ||
response = client.post( | ||
"/api/db/v0/links/", | ||
data=data, | ||
) | ||
assert response.status_code == 201 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Unless there's a convincing reason not to, I think you should create the column with the unique constraint and foreign key included, then add that column to the table. This would make this simpler and more aligned with the foreign key creation in
tb/tables/operations/split.py
. I can think of no reason to do it in separate operations given that we're not supporting multi-column foreign key creation with this.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Isn't creating a foreign key constraint in individual steps much more flexible, as it could allow for multiple columns, although we don't use it now. Does creating a Foreign key as part of the column bring in any advantage?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think the main advantage is that it's easier to see the logic. I acknowledge, however, that it's more flexible in separate steps. If you want to keep separate steps, I can go with it.