diff --git a/README.md b/README.md
index 1a3cf5f0..68796f72 100644
--- a/README.md
+++ b/README.md
@@ -32,20 +32,6 @@ $ ./manage.py migrate
sudo systemctl restart netbox netbox-rq
```
-> [!NOTE]
-> If you are using NetBox Custom Objects with NetBox Branching, you need to insert the following into your `configuration.py`. See the docs for a full description of how NetBox Custom Objects currently works with NetBox Branching.
-
-```
-PLUGINS_CONFIG = {
- 'netbox_branching': {
- 'exempt_models': [
- 'netbox_custom_objects.customobjecttype',
- 'netbox_custom_objects.customobjecttypefield',
- ],
- },
-}
-```
-
## Known Limitations
NetBox Custom Objects is now Generally Available which means you can use it in production and migrations to future versions will work. There are many upcoming features including GraphQL support - the best place to see what's on the way is the [issues](https://github.com/netboxlabs/netbox-custom-objects/issues) list on the GitHub repository.
diff --git a/netbox_custom_objects/__init__.py b/netbox_custom_objects/__init__.py
index af41097c..b1ec966e 100644
--- a/netbox_custom_objects/__init__.py
+++ b/netbox_custom_objects/__init__.py
@@ -32,6 +32,89 @@ def _migration_finished(sender, **kwargs):
_migrations_checked = None
+def _patch_get_serializer_for_model():
+ """
+ Patch utilities.api.get_serializer_for_model to handle dynamically-generated
+ custom object models.
+
+ The default implementation resolves serializers by import path convention
+ (e.g. netbox_custom_objects.api.serializers.Table1ModelSerializer). Dynamic
+ models have no importable serializer at that path, so the call raises
+ SerializerNotFound. This patch intercepts the lookup for APP_LABEL models and
+ delegates to get_serializer_class(), which generates the serializer on the fly.
+ """
+ import utilities.api as _api_utils
+
+ _original = _api_utils.get_serializer_for_model
+
+ def _patched(model, prefix=''):
+ # Only intercept dynamically-generated custom object models (Table1Model,
+ # Table2Model, …) identified by their Table{n}Model name pattern.
+ # CustomObjectType and CustomObjectTypeField live in the same app but
+ # have importable serializers and must go through the normal path.
+ if getattr(model, '_meta', None) and model._meta.app_label == APP_LABEL \
+ and extract_cot_id_from_model_name(model.__name__.lower()) is not None:
+ from netbox_custom_objects.api.serializers import get_serializer_class
+ return get_serializer_class(model)
+ return _original(model, prefix=prefix)
+
+ _api_utils.get_serializer_for_model = _patched
+
+ # Also patch the reference already imported into extras.events (and anywhere
+ # else that did `from utilities.api import get_serializer_for_model` before
+ # our patch ran).
+ try:
+ import extras.events as _extras_events
+ _extras_events.get_serializer_for_model = _patched
+ except (ImportError, AttributeError):
+ pass
+
+
+def _patch_check_object_accessible_in_branch():
+ """
+ Patch check_object_accessible_in_branch to use an existence check instead of
+ a full SELECT for custom object models.
+
+ The original implementation does model.objects.get(pk=object_id) which issues
+ SELECT * including every custom field column. If a field was renamed in the
+ branch but the stable db_column is not yet reflected in the model (e.g. due to
+ a stale cache), this can raise ProgrammingError. For custom objects we only
+ need to know whether the row exists, so filter(pk=...).exists() is sufficient
+ and avoids referencing any column other than the primary key.
+ """
+ try:
+ import netbox_branching.signal_receivers as _sr
+ from netbox_branching.utilities import deactivate_branch
+ from netbox_branching.models import ChangeDiff
+ from core.choices import ObjectChangeActionChoices
+ from django.contrib.contenttypes.models import ContentType
+
+ _original = _sr.check_object_accessible_in_branch
+
+ def _patched(branch, model, object_id):
+ if model._meta.app_label != APP_LABEL:
+ return _original(branch, model, object_id)
+
+ # Check existence in main using only the pk — avoids SELECT on
+ # renamed columns that may not yet exist in main.
+ with deactivate_branch():
+ if model.objects.filter(pk=object_id).exists():
+ return True
+
+ # Not in main — was it created in this branch?
+ content_type = ContentType.objects.get_for_model(model)
+ return ChangeDiff.objects.filter(
+ branch=branch,
+ object_type=content_type,
+ object_id=object_id,
+ action=ObjectChangeActionChoices.ACTION_CREATE,
+ ).exists()
+
+ _sr.check_object_accessible_in_branch = _patched
+ except (ImportError, AttributeError):
+ pass
+
+
def _patch_object_selector_view():
"""
Patch ObjectSelectorView to support dynamically-generated custom object models.
@@ -183,6 +266,14 @@ def ready(self):
# Patch ObjectSelectorView to support dynamically-generated custom object models
_patch_object_selector_view()
+ # Patch get_serializer_for_model so event rules, job serializers, etc. can
+ # resolve serializers for dynamically-generated custom object models.
+ _patch_get_serializer_for_model()
+
+ # Patch check_object_accessible_in_branch to use pk-only existence check,
+ # avoiding SELECT * which references renamed columns that may not exist in main.
+ _patch_check_object_accessible_in_branch()
+
# Suppress warnings about database calls during app initialization
with warnings.catch_warnings():
warnings.filterwarnings(
diff --git a/netbox_custom_objects/api/views.py b/netbox_custom_objects/api/views.py
index 3ce1107d..56a70cc5 100644
--- a/netbox_custom_objects/api/views.py
+++ b/netbox_custom_objects/api/views.py
@@ -17,13 +17,8 @@ class ETagMixin: # pragma: no cover – NetBox < 4.6 shim
from netbox_custom_objects.filtersets import get_filterset_class
from netbox_custom_objects.models import CustomObjectType, CustomObjectTypeField
-from netbox_custom_objects.utilities import is_in_branch
-
from . import serializers
-# Constants
-BRANCH_ACTIVE_ERROR_MESSAGE = _("Please switch to the main branch to perform this operation.")
-
class RootView(APIRootView):
def get_view_name(self):
@@ -77,14 +72,9 @@ def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
- if is_in_branch():
- raise ValidationError(BRANCH_ACTIVE_ERROR_MESSAGE)
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
- if is_in_branch():
- raise ValidationError(BRANCH_ACTIVE_ERROR_MESSAGE)
-
# Replicate DRF's UpdateModelMixin.update() so we can snapshot the instance
# before the serializer is constructed. Calling super().update() would invoke
# get_object() a second time and return a fresh, un-snapshotted instance.
diff --git a/netbox_custom_objects/field_types.py b/netbox_custom_objects/field_types.py
index 3b881f2c..df3ded23 100644
--- a/netbox_custom_objects/field_types.py
+++ b/netbox_custom_objects/field_types.py
@@ -987,11 +987,12 @@ def after_model_generation(self, instance, model, field_name):
target_field.remote_field.model = to_model
target_field.related_model = to_model
- def create_m2m_table(self, instance, model, field_name):
+ def create_m2m_table(self, instance, model, field_name, schema_conn=None):
"""
Creates the actual M2M table after models are fully generated
"""
- from django.db import connection
+ from django.db import connection as default_connection
+ connection = schema_conn if schema_conn is not None else default_connection
# Get the field instance
field = model._meta.get_field(field_name)
diff --git a/netbox_custom_objects/filtersets.py b/netbox_custom_objects/filtersets.py
index 740e2075..f5cbced5 100644
--- a/netbox_custom_objects/filtersets.py
+++ b/netbox_custom_objects/filtersets.py
@@ -3,6 +3,7 @@
from django.contrib.postgres.fields import ArrayField
from django.db.models import JSONField, Q
from django.utils.dateparse import parse_date, parse_datetime
+from django.utils.timezone import make_aware, is_aware
from extras.choices import CustomFieldTypeChoices
from netbox.filtersets import NetBoxModelFilterSet
@@ -89,6 +90,8 @@ def search(self, queryset, name, value):
elif field.type == CustomFieldTypeChoices.TYPE_DATETIME:
parsed = parse_datetime(value)
if parsed is not None:
+ if not is_aware(parsed):
+ parsed = make_aware(parsed)
q |= Q(**{f"{field.name}__exact": parsed})
if not q:
return queryset.none()
diff --git a/netbox_custom_objects/forms.py b/netbox_custom_objects/forms.py
index 0642ca34..1157c231 100644
--- a/netbox_custom_objects/forms.py
+++ b/netbox_custom_objects/forms.py
@@ -176,6 +176,7 @@ class CustomObjectTypeFieldForm(CustomFieldForm):
class Meta:
model = CustomObjectTypeField
fields = "__all__"
+ exclude = ('db_column',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
diff --git a/netbox_custom_objects/migrations/0007_fix_object_field_fk_deferrable.py b/netbox_custom_objects/migrations/0007_fix_object_field_fk_deferrable.py
new file mode 100644
index 00000000..6e57b53b
--- /dev/null
+++ b/netbox_custom_objects/migrations/0007_fix_object_field_fk_deferrable.py
@@ -0,0 +1,74 @@
+"""
+Drop DEFERRABLE INITIALLY DEFERRED from FK constraints on custom object tables.
+
+Prior to this migration, _ensure_field_fk_constraint() created FK constraints
+with DEFERRABLE INITIALLY DEFERRED. That attribute causes PostgreSQL to queue
+trigger events that block subsequent ALTER TABLE calls (e.g. remove_field during
+a branch revert), raising "cannot ALTER TABLE because it has pending trigger
+events".
+
+This migration finds all DEFERRABLE FK constraints on tables whose names start
+with "custom_objects_" and recreates them as non-DEFERRABLE with ON DELETE
+CASCADE, matching the behaviour of the updated _ensure_field_fk_constraint().
+"""
+
+from django.db import migrations
+
+
+def fix_deferrable_fk_constraints(apps, schema_editor):
+ """
+ Re-create any DEFERRABLE FK constraints on custom object tables as
+ non-DEFERRABLE. Uses information_schema so no Django model loading
+ is required — safe to run during the migration pass even though dynamic
+ models are not yet registered.
+ """
+ with schema_editor.connection.cursor() as cursor:
+ # Find all DEFERRABLE FK constraints on custom_objects_* tables.
+ cursor.execute("""
+ SELECT
+ tc.table_name,
+ tc.constraint_name,
+ kcu.column_name,
+ ccu.table_name AS foreign_table_name
+ FROM information_schema.table_constraints AS tc
+ JOIN information_schema.key_column_usage AS kcu
+ ON tc.constraint_name = kcu.constraint_name
+ AND tc.table_schema = kcu.table_schema
+ JOIN information_schema.constraint_column_usage AS ccu
+ ON ccu.constraint_name = tc.constraint_name
+ AND ccu.table_schema = tc.table_schema
+ JOIN information_schema.referential_constraints AS rc
+ ON tc.constraint_name = rc.constraint_name
+ AND tc.table_schema = rc.constraint_schema
+ WHERE tc.constraint_type = 'FOREIGN KEY'
+ AND tc.table_name LIKE 'custom_objects\\_%%'
+ AND tc.is_deferrable = 'YES'
+ """)
+ rows = cursor.fetchall()
+
+ for table_name, constraint_name, column_name, foreign_table in rows:
+ new_constraint_name = f'{table_name}_{column_name}_fk_cascade'
+ cursor.execute(
+ f'ALTER TABLE "{table_name}" DROP CONSTRAINT "{constraint_name}"'
+ )
+ cursor.execute(f"""
+ ALTER TABLE "{table_name}"
+ ADD CONSTRAINT "{new_constraint_name}"
+ FOREIGN KEY ("{column_name}")
+ REFERENCES "{foreign_table}" ("id")
+ ON DELETE CASCADE
+ """)
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('netbox_custom_objects', '0006_customobjecttypefield_related_name_and_more'),
+ ]
+
+ operations = [
+ migrations.RunPython(
+ fix_deferrable_fk_constraints,
+ migrations.RunPython.noop,
+ ),
+ ]
diff --git a/netbox_custom_objects/migrations/0008_customobjecttypefield_db_column.py b/netbox_custom_objects/migrations/0008_customobjecttypefield_db_column.py
new file mode 100644
index 00000000..70ff4917
--- /dev/null
+++ b/netbox_custom_objects/migrations/0008_customobjecttypefield_db_column.py
@@ -0,0 +1,49 @@
+"""
+Add ``db_column`` to CustomObjectTypeField and back-fill it from ``name``.
+
+``db_column`` is frozen at field creation time so that subsequent renames are
+pure metadata operations — the physical database column name never changes.
+This prevents cross-schema column-name mismatches when a field is renamed in
+one schema (e.g. a branch) and the model is then used to query a different
+schema (e.g. main) that still has the original column name.
+
+The data migration sets ``db_column = name`` for all existing fields so that
+``effective_db_column`` returns the same value as before the migration.
+"""
+
+from django.db import migrations, models
+
+
+def backfill_db_column(apps, schema_editor):
+ """Set db_column = name for all existing CustomObjectTypeField rows."""
+ CustomObjectTypeField = apps.get_model('netbox_custom_objects', 'CustomObjectTypeField')
+ CustomObjectTypeField.objects.filter(db_column='').update(db_column=models.F('name'))
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('netbox_custom_objects', '0007_fix_object_field_fk_deferrable'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='customobjecttypefield',
+ name='db_column',
+ field=models.CharField(
+ blank=True,
+ default='',
+ help_text=(
+ 'Physical database column name. Set once at creation and never changed, '
+ 'so renames are pure metadata changes that do not require DDL.'
+ ),
+ max_length=50,
+ verbose_name='database column',
+ ),
+ preserve_default=False,
+ ),
+ migrations.RunPython(
+ backfill_db_column,
+ migrations.RunPython.noop,
+ ),
+ ]
diff --git a/netbox_custom_objects/models.py b/netbox_custom_objects/models.py
index aabb2927..ba0c27b5 100644
--- a/netbox_custom_objects/models.py
+++ b/netbox_custom_objects/models.py
@@ -1,4 +1,6 @@
+import contextvars
import decimal
+import logging
import re
import threading
from datetime import date, datetime
@@ -58,6 +60,9 @@
from netbox_custom_objects.utilities import _suppress_clear_cache, generate_model
+logger = logging.getLogger('netbox_custom_objects.models')
+
+
class UniquenessConstraintTestError(Exception):
"""Custom exception used to signal successful uniqueness constraint test."""
@@ -66,6 +71,302 @@ class UniquenessConstraintTestError(Exception):
USER_TABLE_DATABASE_NAME_PREFIX = "custom_objects_"
+# Per-context storage for CO field values deferred during squash merge.
+# Using ContextVar instead of a class-level dict so that concurrent merges
+# (different threads or coroutines) each get an isolated copy.
+# Shape: {db_table: {co_pk: {'using': alias, 'data': {field_name: value}}}}
+_deferred_co_field_data: contextvars.ContextVar[dict | None] = contextvars.ContextVar(
+ '_deferred_co_field_data', default=None
+)
+
+
+def _get_schema_connection():
+ """
+ Return the active branch's DB connection when called within a branch context,
+ otherwise return the default (main-schema) connection.
+
+ Used so that schema-editor operations (add/alter/remove column) target the
+ correct PostgreSQL schema without requiring every call-site to be branch-aware.
+ """
+ try:
+ from netbox_branching.contextvars import active_branch
+ branch = active_branch.get()
+ if branch is not None:
+ from django.db import connections
+ return connections[branch.connection_name]
+ except ImportError:
+ pass
+ return connection
+
+
+def _apply_deferred_co_field(field_instance):
+ """
+ Apply any deferred CO field values after a column is added to the DB.
+
+ Called by CustomObjectTypeField.save() after schema_editor.add_field() so that
+ custom object rows inserted before their columns existed (squash merge ordering)
+ receive their correct values via a raw UPDATE.
+
+ ``_deferred_co_field_data`` (ContextVar) has the shape::
+
+ {db_table: {co_pk: {'using': alias, 'data': {field_name: value}}}}
+
+ For TYPE_OBJECT fields the postchange_data key is ``{name}`` but the DB column
+ is ``{name}_id`` — this function maps accordingly.
+ For TYPE_MULTIOBJECT fields there is no column on the main table, so they are
+ skipped entirely.
+ """
+ from extras.choices import CustomFieldTypeChoices
+
+ # No deferred data at all — fast path.
+ deferred = _deferred_co_field_data.get()
+ if not deferred:
+ return
+
+ cot = field_instance.custom_object_type
+ table_name = cot.get_database_table_name()
+ per_table = deferred.get(table_name)
+ if not per_table:
+ return
+
+ # M2M has no column on the main table — nothing to UPDATE.
+ if field_instance.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT:
+ return
+
+ # For TYPE_OBJECT the data key is the field name but the DB column ends with _id.
+ data_key = field_instance.name
+ if field_instance.type == CustomFieldTypeChoices.TYPE_OBJECT:
+ col_name = f'{field_instance.name}_id'
+ else:
+ col_name = field_instance.name
+
+ schema_conn = _get_schema_connection()
+
+ with schema_conn.cursor() as cursor:
+ for co_pk, entry in per_table.items():
+ value = entry['data'].get(data_key)
+ if value is None:
+ continue
+ # table_name comes from get_database_table_name() (controlled by our
+ # code) and col_name from field.name, which is validated by the
+ # ^[a-z0-9_]+$ regex — no double-quote characters are possible, so
+ # the f-string interpolation is safe against SQL injection here.
+ cursor.execute(
+ f'UPDATE "{table_name}" SET "{col_name}" = %s WHERE id = %s',
+ [value, co_pk],
+ )
+
+ # Remove the consumed key from each entry so that processed field data does
+ # not persist in the ContextVar beyond its useful lifetime (e.g. on a retry
+ # after a partial failure, stale data from a previous attempt is avoided).
+ for entry in per_table.values():
+ entry['data'].pop(data_key, None)
+
+ # Prune entries whose data dict is now exhausted.
+ exhausted = [pk for pk, entry in per_table.items() if not entry['data']]
+ for pk in exhausted:
+ del per_table[pk]
+ if not per_table:
+ del deferred[table_name]
+ if not deferred:
+ _deferred_co_field_data.set(None)
+
+
+def _schema_add_field(fi, model, schema_editor, schema_conn):
+ """
+ Issue ``add_field`` against the physical schema for *fi*.
+
+ Handles through-table creation for MULTIOBJECT fields. Does NOT apply
+ deferred CO field data — callers that need that (squash merge context) must
+ call ``_apply_deferred_co_field(fi)`` separately after this returns.
+
+ Idempotent: skips the ALTER TABLE if the column already exists (e.g. when
+ sync/merge replays an ObjectChange that was already applied).
+ """
+ ft = FIELD_TYPE_CLASS[fi.type]()
+ mf = ft.get_model_field(fi, db_column=fi.effective_db_column)
+ mf.contribute_to_class(model, fi.name)
+
+ with schema_conn.cursor() as cursor:
+ existing_cols = {
+ col.name
+ for col in schema_conn.introspection.get_table_description(cursor, model._meta.db_table)
+ }
+ if mf.column in existing_cols:
+ logger.debug('_schema_add_field: %r already exists on %s, skipping', mf.column, model._meta.db_table)
+ return
+
+ schema_editor.add_field(model, mf)
+ if fi.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT:
+ ft.create_m2m_table(fi, model, fi.name, schema_conn=schema_conn)
+
+
+def _schema_remove_field(fi, model, schema_editor, existing_tables=None):
+ """
+ Issue ``remove_field`` against the physical schema for *fi*.
+
+ For MULTIOBJECT fields the through table is dropped first. When
+ *existing_tables* is a pre-fetched list only tables present in it are
+ dropped; when it is ``None`` (main-schema context) the drop is always
+ attempted.
+
+ Always issues ``SET CONSTRAINTS ALL IMMEDIATE`` before ``remove_field`` to
+ flush any DEFERRABLE FK trigger events that would otherwise cause PostgreSQL
+ to reject the subsequent ALTER TABLE.
+ """
+ ft = FIELD_TYPE_CLASS[fi.type]()
+ mf = ft.get_model_field(fi, db_column=fi.effective_db_column)
+ mf.contribute_to_class(model, fi.name)
+
+ if fi.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT:
+ through_table = fi.through_table_name
+ if existing_tables is None or through_table in existing_tables:
+ through_meta = type(
+ 'Meta', (),
+ {'db_table': through_table, 'app_label': APP_LABEL, 'managed': True},
+ )
+ through_model = type(
+ f'_TempThrough_{through_table}',
+ (models.Model,),
+ {'Meta': through_meta, '__module__': 'netbox_custom_objects.models'},
+ )
+ schema_editor.delete_model(through_model)
+
+ # Flush any pending DEFERRABLE FK trigger events before ALTER TABLE;
+ # otherwise PostgreSQL raises "pending trigger events" when removing a FK field.
+ schema_editor.execute('SET CONSTRAINTS ALL IMMEDIATE')
+ schema_editor.remove_field(model, mf)
+
+
+def _schema_alter_field(old_fi, new_fi, model, schema_editor, schema_conn, existing_tables=None):
+ """
+ Issue ``alter_field`` against the physical schema, updating *old_fi* to *new_fi*.
+
+ For MULTIOBJECT fields whose name changes the through table is renamed before
+ ``alter_field`` is called. When the old through table is absent (e.g. the
+ branch has never seen this field) the new through table is created from scratch
+ instead.
+
+ *existing_tables* — optional pre-fetched table name list from the target
+ connection. When given, through-table operations are guarded by membership
+ checks. When ``None`` (main-schema context) the schema_conn is introspected
+ once on demand.
+
+ Idempotent: skips the ALTER TABLE if the old column is already gone and the
+ new column already exists (e.g. when sync/merge replays an ObjectChange that
+ was already applied).
+
+ Conflict resolution: when neither the old nor the new column exists (the field
+ was independently renamed in the target schema — e.g. branch renamed A→X while
+ main renamed A→Y), the live field record is looked up from the target schema to
+ find the actual current column name, which is then renamed to the new target.
+ A warning is logged to flag the conflict.
+ """
+ old_is_m2m = old_fi.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT
+ new_is_m2m = new_fi.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT
+
+ # A type change between MULTIOBJECT and a scalar type (or vice versa) is not
+ # a simple column rename/alter — the storage representation is fundamentally
+ # different (through-table vs column). Attempting alter_field in this case
+ # would fail at the DB level. Log and skip; the caller is expected to handle
+ # such changes as remove + add rather than alter.
+ if old_is_m2m != new_is_m2m:
+ logger.warning(
+ '_schema_alter_field: skipping unsupported type change %r→%r on %s '
+ '(MULTIOBJECT ↔ scalar changes require remove+add, not alter)',
+ old_fi.type, new_fi.type, model._meta.db_table,
+ )
+ return
+
+ old_mf = FIELD_TYPE_CLASS[old_fi.type]().get_model_field(old_fi, db_column=old_fi.effective_db_column)
+ new_mf = FIELD_TYPE_CLASS[new_fi.type]().get_model_field(new_fi, db_column=new_fi.effective_db_column)
+ old_mf.contribute_to_class(model, old_fi.name)
+ new_mf.contribute_to_class(model, new_fi.name)
+
+ with schema_conn.cursor() as cursor:
+ existing_cols = {
+ col.name
+ for col in schema_conn.introspection.get_table_description(cursor, model._meta.db_table)
+ }
+ if old_mf.column not in existing_cols:
+ if new_mf.column in existing_cols:
+ logger.debug(
+ '_schema_alter_field: %r already renamed to %r on %s, skipping',
+ old_mf.column, new_mf.column, model._meta.db_table,
+ )
+ return
+ if old_is_m2m:
+ # M2M fields have no physical column; the old through table is absent.
+ return
+ # Scalar field: neither the old nor the new column exists. The field was
+ # independently renamed in this schema (e.g. branch renamed A→X while main
+ # renamed A→Y; now applying main's rename to the branch). Look up the live
+ # field record in the target schema to find the actual column and rename it.
+ logger.warning(
+ '_schema_alter_field: rename conflict on %s — source column %r and '
+ 'target column %r are both absent; field pk=%d was independently renamed '
+ 'in this schema; resolving by looking up live column',
+ model._meta.db_table, old_mf.column, new_mf.column, new_fi.pk,
+ )
+ try:
+ live_fi = CustomObjectTypeField.objects.using(schema_conn.alias).get(pk=new_fi.pk)
+ except CustomObjectTypeField.DoesNotExist:
+ logger.debug(
+ '_schema_alter_field: field pk=%d not found in %s; skipping',
+ new_fi.pk, schema_conn.alias,
+ )
+ return
+ live_mf = FIELD_TYPE_CLASS[live_fi.type]().get_model_field(live_fi, db_column=live_fi.effective_db_column)
+ live_mf.contribute_to_class(model, live_fi.name)
+ if live_mf.column not in existing_cols:
+ logger.debug(
+ '_schema_alter_field: live column %r also absent on %s; skipping',
+ live_mf.column, model._meta.db_table,
+ )
+ return
+ schema_editor.alter_field(model, live_mf, new_mf)
+ return
+
+ if (
+ new_is_m2m
+ and old_fi.name != new_fi.name
+ ):
+ old_through = old_fi.through_table_name
+ new_through = new_fi.through_table_name
+
+ tables = existing_tables
+ if tables is None:
+ with schema_conn.cursor() as cursor:
+ tables = schema_conn.introspection.table_names(cursor)
+
+ if old_through in tables:
+ old_through_meta = type(
+ 'Meta', (),
+ {'db_table': old_through, 'app_label': APP_LABEL, 'managed': True},
+ )
+ old_through_model = generate_model(
+ f'_TempOldThrough_{old_through}',
+ (models.Model,),
+ {
+ '__module__': 'netbox_custom_objects.models',
+ 'Meta': old_through_meta,
+ 'id': models.AutoField(primary_key=True),
+ 'source': models.ForeignKey(
+ model, on_delete=models.CASCADE, db_column='source_id', related_name='+',
+ ),
+ 'target': models.ForeignKey(
+ model, on_delete=models.CASCADE, db_column='target_id', related_name='+',
+ ),
+ },
+ )
+ schema_editor.alter_db_table(old_through_model, old_through, new_through)
+ else:
+ # Old through table absent — create the new one from scratch
+ ft = FIELD_TYPE_CLASS[new_fi.type]()
+ ft.create_m2m_table(new_fi, model, new_fi.name, schema_conn=schema_conn)
+
+ schema_editor.alter_field(model, old_mf, new_mf)
+
class CustomObject(
BookmarksMixin,
@@ -99,6 +400,73 @@ class CustomObject(
class Meta:
abstract = True
+ @classmethod
+ def deserialize_object(cls, data, pk=None):
+ """
+ Hook called by ObjectChange.apply() for CREATE actions.
+
+ The squash merge strategy may apply a CO's CREATE before its
+ CustomObjectTypeField rows are in main (the dependency graph has no FK
+ from CO to fields). When that happens, the standard Django
+ deserialization would INSERT the CO with NULL custom-field values
+ because the columns don't exist yet.
+
+ This hook:
+ 1. Deserializes the CO using a fresh model (re-queried from DB).
+ 2. Does save_base(raw=True) as normal.
+ 3. Stores the full postchange_data in _deferred_co_field_data (ContextVar)
+ so that CustomObjectTypeField.save() can UPDATE the row after each
+ column is added (handles the squash ordering case).
+ """
+ from utilities.serialization import deserialize_object as _deserialize
+ from netbox_custom_objects.utilities import extract_cot_id_from_model_name
+
+ # Derive the COT primary key from the model class name (e.g. 'Table1Model' → 1)
+ cot_id_str = extract_cot_id_from_model_name(cls.__name__.lower())
+ if cot_id_str is None:
+ # Not a generated model name — fall back to standard deserialization.
+ return _deserialize(cls, data, pk=pk)
+ cot_id = int(cot_id_str) # regex guarantees digits-only
+
+ # Refresh the model cache so we pick up any fields already applied to main.
+ # (In the squash case the cache may still point to a zero-field model.)
+ from netbox_custom_objects.models import CustomObjectType as _COT # noqa: F401
+ _COT.clear_model_cache(cot_id)
+ try:
+ cot = _COT.objects.get(pk=cot_id)
+ fresh_model = cot.get_model()
+ except _COT.DoesNotExist:
+ fresh_model = cls
+
+ inner = _deserialize(fresh_model, data, pk=pk)
+ obj = inner.object
+ table_name = fresh_model._meta.db_table
+ full_data = dict(data)
+
+ class _Deserialized:
+ object = obj
+
+ def save(self, using=None, **_kwargs):
+ from django.db import DEFAULT_DB_ALIAS
+ _using = using or DEFAULT_DB_ALIAS
+ models.Model.save_base(obj, using=_using, raw=True)
+ # Read pk after save_base so that auto-assigned PKs are captured.
+ # (If pk was None before save_base, obj.pk is now the DB-assigned id.)
+ obj_pk = obj.pk
+ # Register full data for deferred column updates (squash ordering fix).
+ deferred = _deferred_co_field_data.get()
+ if deferred is None:
+ deferred = {}
+ _deferred_co_field_data.set(deferred)
+ if table_name not in deferred:
+ deferred[table_name] = {}
+ deferred[table_name][obj_pk] = {
+ 'using': _using,
+ 'data': full_data,
+ }
+
+ return _Deserialized()
+
def __str__(self):
# Find the field with primary=True and return that field's "name" as the name of the object
primary_field = self._field_objects.get(self._primary_field_id, None)
@@ -238,6 +606,9 @@ def __str__(self):
return self.display_name
def clean(self):
+ # Guard against None (can arrive via update_object during branch revert)
+ if self.custom_field_data is None:
+ self.custom_field_data = {}
super().clean()
if not self.slug:
@@ -337,6 +708,13 @@ def get_cached_through_models(cls, custom_object_type_id):
"""
return cls._through_model_cache.get(custom_object_type_id, {})
+ def serialize_object(self, exclude=None):
+ # cache_timestamp is an internal cache-invalidation field; exclude it
+ # from ObjectChange records so it doesn't appear as a tracked change.
+ extra = ['cache_timestamp']
+ combined = list(exclude or []) + extra
+ return super().serialize_object(exclude=combined)
+
def get_absolute_url(self):
return reverse("plugins:netbox_custom_objects:customobjecttype", args=[self.pk])
@@ -379,7 +757,7 @@ def _fetch_and_generate_field_attrs(
field_name = field.name
field_attrs[field.name] = field_type.get_model_field(
- field,
+ field, db_column=field.effective_db_column,
)
# Add to field objects only if the field was successfully generated
@@ -654,7 +1032,7 @@ def _ensure_field_fk_constraint(self, model, field_name):
related_table = related_model._meta.db_table
column_name = model_field.column
- with connection.cursor() as cursor:
+ with _get_schema_connection().cursor() as cursor:
# Drop existing FK constraint if it exists
# Query for existing constraints
cursor.execute("""
@@ -669,7 +1047,9 @@ def _ensure_field_fk_constraint(self, model, field_name):
constraint_name = row[0]
cursor.execute(f'ALTER TABLE "{table_name}" DROP CONSTRAINT IF EXISTS "{constraint_name}"')
- # Create new FK constraint with ON DELETE CASCADE
+ # Create new FK constraint with ON DELETE CASCADE.
+ # Not DEFERRABLE: a deferred constraint leaves pending trigger events that block
+ # subsequent ALTER TABLE calls (e.g. during branch revert remove_field).
constraint_name = f"{table_name}_{column_name}_fk_cascade"
cursor.execute(f"""
ALTER TABLE "{table_name}"
@@ -677,7 +1057,6 @@ def _ensure_field_fk_constraint(self, model, field_name):
FOREIGN KEY ("{column_name}")
REFERENCES "{related_table}" ("id")
ON DELETE CASCADE
- DEFERRABLE INITIALLY DEFERRED
""")
def _ensure_all_fk_constraints(self, model):
@@ -700,18 +1079,61 @@ def create_model(self):
# Ensure the ContentType exists and is immediately available
features = get_model_features(model)
- if 'branching' in features:
- features.remove('branching')
self.object_type.features = features
self.object_type.public = True
self.object_type.save()
- with connection.schema_editor() as schema_editor:
+ with _get_schema_connection().schema_editor() as schema_editor:
schema_editor.create_model(model)
get_serializer_class(model)
self.register_custom_object_search_index(model)
+ @classmethod
+ def deserialize_object(cls, data, pk=None):
+ """
+ Custom deserialization hook for netbox-branching's merge/revert engine.
+
+ ``ObjectChange.apply()`` normally uses ``DeserializedObject.save()``, which
+ calls ``Model.save_base(raw=True)`` — bypassing our ``save()`` override and
+ all ``post_save`` signals. That means ``create_model()`` never runs and the
+ physical table is never created in the destination schema.
+
+ By implementing this classmethod the apply engine calls our version instead,
+ returning a wrapper whose ``save()`` invokes the full ``CustomObjectType.save()``
+ lifecycle (signals included) so that the table is created as a side effect of
+ replaying the ObjectChange.
+
+ ``object_type`` is cleared before saving so the ``custom_object_type_post_save_handler``
+ can re-create and link it correctly in the destination schema, avoiding any FK
+ mismatch between the branch and main ``ObjectType`` pks.
+ """
+ from utilities.serialization import deserialize_object as _deserialize
+
+ inner = _deserialize(cls, data, pk=pk)
+
+ class _SchemaAwareDeserialized:
+ def __init__(self, deserialized):
+ self._inner = deserialized
+ self.object = deserialized.object
+
+ def save(self, using=None, **kwargs):
+ # Snapshot before modifying so that diff()['pre'] records the
+ # current state rather than showing all fields as None on revert.
+ self.object.snapshot()
+ # Clear the ObjectType FK — it may not exist in main yet.
+ # custom_object_type_post_save_handler re-sets it after INSERT.
+ self.object.object_type = None
+ self.object.object_type_id = None
+ self.object.save()
+ # Re-apply any M2M data (tags, etc.) that was stripped during deserialization.
+ if self._inner.m2m_data:
+ for accessor_name, object_list in self._inner.m2m_data.items():
+ getattr(self.object, accessor_name).set(object_list)
+ self._inner.m2m_data = None
+
+ return _SchemaAwareDeserialized(inner)
+
def save(self, *args, **kwargs):
needs_db_create = self._state.adding
@@ -728,20 +1150,41 @@ def delete(self, *args, **kwargs):
self.clear_model_cache(self.id)
model = self.get_model()
+ schema_conn = _get_schema_connection()
+ in_branch = schema_conn is not connection
# Delete all CustomObjectTypeFields that reference this CustomObjectType
for field in CustomObjectTypeField.objects.filter(related_object_type=self.object_type):
field.delete()
object_type = ObjectType.objects.get_for_model(model)
- ObjectChange.objects.filter(changed_object_type=object_type).delete()
+
+ # ObjectChange and ObjectType records live in the main schema. Only clean
+ # them up when operating outside a branch; inside a branch they belong to
+ # main and must not be touched until the deletion is merged.
+ if not in_branch:
+ ObjectChange.objects.filter(changed_object_type=object_type).delete()
+
super().delete(*args, **kwargs)
- # Temporarily disconnect the pre_delete handler to skip the ObjectType deletion
- # TODO: Remove this disconnect/reconnect after ObjectType has been exempted from handle_deleted_object
- pre_delete.disconnect(handle_deleted_object)
- object_type.delete()
- with connection.schema_editor() as schema_editor:
+ if not in_branch:
+ # ChangeDiff has a PROTECT FK to ContentType/ObjectType — delete those
+ # records first so object_type.delete() is not blocked.
+ try:
+ from netbox_branching.models import ChangeDiff
+ ChangeDiff.objects.filter(object_type=object_type).delete()
+ except ImportError:
+ pass
+ # Temporarily disconnect the pre_delete handler to skip the ObjectType deletion
+ # TODO: Remove this disconnect/reconnect after ObjectType has been exempted from handle_deleted_object
+ pre_delete.disconnect(handle_deleted_object)
+ object_type.delete()
+ pre_delete.connect(handle_deleted_object)
+
+ with schema_conn.schema_editor() as schema_editor:
+ # Drop through tables before the main table (they have FKs pointing to it).
+ for through_model in getattr(model, '_through_models', []):
+ schema_editor.delete_model(through_model)
schema_editor.delete_model(model)
# Unregister the model and its through-models from Django's app registry so
@@ -767,9 +1210,6 @@ def delete(self, *args, **kwargs):
# Re-clear the model cache to remove re-cached model from get_model.
self.clear_model_cache(self.id)
- # Reconnect the pre_delete handler after all cleanup is done.
- pre_delete.connect(handle_deleted_object)
-
@receiver(post_save, sender=CustomObjectType)
def custom_object_type_post_save_handler(sender, instance, created, **kwargs):
@@ -780,10 +1220,66 @@ def custom_object_type_post_save_handler(sender, instance, created, **kwargs):
app_label=APP_LABEL,
model=content_type_name
)
+ # Snapshot before modifying so change logging records a correct pre-state.
+ # Without this, diff()['pre'] would set all fields to None during branch revert.
+ instance.snapshot()
instance.object_type = ct
instance.save()
+def _rename_objectchange_field_key(fi, old_name, new_name):
+ """
+ Rename a JSON key in all ObjectChange records for CustomObject instances of
+ this field's type, reflecting a field rename from *old_name* to *new_name*.
+
+ Updates both ``prechange_data`` and ``postchange_data`` in the ObjectChange
+ table, and ``original``/``modified``/``current`` in netbox-branching's
+ ChangeDiff table when that plugin is installed.
+
+ Field names are validated with ``^[a-z0-9_]+$`` so string formatting of the
+ column names here is safe against SQL injection.
+
+ This runs inside the same ``transaction.atomic()`` block as
+ ``CustomObjectTypeField.save()``, so it rolls back cleanly if the enclosing
+ transaction is aborted.
+ """
+ from django.db import connections
+
+ cot = fi.custom_object_type
+ model = cot.get_model()
+ ct = ContentType.objects.get_for_model(model)
+ conn = _get_schema_connection()
+
+ oc_sql = (
+ 'UPDATE core_objectchange '
+ 'SET {col} = ({col} - %s) || jsonb_build_object(%s, {col}->%s) '
+ 'WHERE changed_object_type_id = %s AND {col} ? %s'
+ )
+ with connections[conn.alias].cursor() as cursor:
+ for json_col in ('prechange_data', 'postchange_data'):
+ cursor.execute(oc_sql.format(col=json_col), [old_name, new_name, old_name, ct.id, old_name])
+
+ logger.debug('_rename_objectchange_field_key: %r → %r for %s', old_name, new_name, ct)
+
+ try:
+ from netbox_branching.models import ChangeDiff # noqa: F401 — presence check only
+ cd_sql = (
+ 'UPDATE netbox_branching_changediff '
+ 'SET {col} = ({col} - %s) || jsonb_build_object(%s, {col}->%s) '
+ 'WHERE object_type_id = %s AND {col} IS NOT NULL AND {col} ? %s'
+ )
+ with connections[conn.alias].cursor() as cursor:
+ for json_col in ('original', 'modified', 'current'):
+ cursor.execute(cd_sql.format(col=json_col), [old_name, new_name, old_name, ct.id, old_name])
+ except ImportError:
+ pass # netbox-branching not installed
+ except Exception:
+ logger.debug(
+ '_rename_objectchange_field_key: ChangeDiff rename failed for %r → %r',
+ old_name, new_name, exc_info=True,
+ )
+
+
class CustomObjectTypeField(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel):
custom_object_type = models.ForeignKey(
CustomObjectType, on_delete=models.CASCADE, related_name="fields"
@@ -835,6 +1331,15 @@ class CustomObjectTypeField(CloningMixin, ExportTemplatesMixin, ChangeLoggedMode
),
),
)
+ db_column = models.CharField(
+ verbose_name=_("database column"),
+ max_length=50,
+ blank=True,
+ help_text=_(
+ "Physical database column name. Set once at creation and never changed, "
+ "so renames are pure metadata changes that do not require DDL."
+ ),
+ )
label = models.CharField(
verbose_name=_("label"),
max_length=50,
@@ -1028,6 +1533,16 @@ def is_single_value(self):
def many(self):
return self.type in ["multiobject"]
+ @property
+ def effective_db_column(self):
+ """
+ Return the physical database column name for this field.
+
+ ``db_column`` is frozen at creation time so that renames are pure
+ metadata operations — the physical column name never changes.
+ """
+ return self.db_column
+
def get_child_relations(self, instance):
return instance.get_field_value(self)
@@ -1142,19 +1657,21 @@ def clean(self):
{"unique": _("Uniqueness cannot be enforced for boolean or multiobject fields")}
)
- # Check if uniqueness constraint can be applied when changing from non-unique to unique
+ # Check if uniqueness constraint can be applied when changing from non-unique to unique.
+ # Skip when _original is absent (e.g. during deserialization in branch merge/revert).
if (
self.pk
and self.unique
- and not self.original.unique
and not self._state.adding
+ and hasattr(self, '_original')
+ and not self.original.unique
):
field_type = FIELD_TYPE_CLASS[self.type]()
- model_field = field_type.get_model_field(self)
+ model_field = field_type.get_model_field(self, db_column=self.effective_db_column)
model = self.custom_object_type.get_model()
model_field.contribute_to_class(model, self.name)
- old_field = field_type.get_model_field(self.original)
+ old_field = field_type.get_model_field(self.original, db_column=self.original.effective_db_column)
old_field.contribute_to_class(model, self._original_name)
try:
@@ -1625,116 +2142,62 @@ def through_table_name(self):
def through_model_name(self):
return f"Through_{self.through_table_name}"
+ @classmethod
+ def deserialize_object(cls, data, pk=None):
+ """
+ Custom deserialization hook for netbox-branching's merge/revert engine.
+
+ Same problem as ``CustomObjectType.deserialize_object``: the default
+ ``DeserializedObject.save(raw=True)`` bypasses ``CustomObjectTypeField.save()``,
+ so the physical column is never added to the custom object table.
+
+ This wrapper calls the real ``save()`` so that ``add_field`` runs as a side
+ effect of replaying the CREATE ObjectChange during a merge.
+ """
+ from utilities.serialization import deserialize_object as _deserialize
+
+ inner = _deserialize(cls, data, pk=pk)
+
+ class _SchemaAwareDeserialized:
+ def __init__(self, deserialized):
+ self._inner = deserialized
+ self.object = deserialized.object
+
+ def save(self, using=None, **kwargs):
+ self.object.save()
+ if self._inner.m2m_data:
+ for accessor_name, object_list in self._inner.m2m_data.items():
+ getattr(self.object, accessor_name).set(object_list)
+ self._inner.m2m_data = None
+
+ return _SchemaAwareDeserialized(inner)
+
def save(self, *args, **kwargs):
is_new = self._state.adding
- field_type = FIELD_TYPE_CLASS[self.type]()
- model_field = field_type.get_model_field(self)
- model = self.custom_object_type.get_model()
- model_field.contribute_to_class(model, self.name)
- with connection.schema_editor() as schema_editor:
- if self._state.adding:
- schema_editor.add_field(model, model_field)
- if self.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT:
- field_type.create_m2m_table(self, model, self.name)
- else:
- old_field = field_type.get_model_field(self.original)
- old_field.contribute_to_class(model, self._original_name)
+ # Freeze the physical column name at creation. db_column is set once
+ # here and never updated, so subsequent renames only update the ORM
+ # field name — no DDL is required for renames.
+ if self._state.adding and not self.db_column:
+ self.db_column = self.name
- # Special handling for MultiObject fields when the name changes
- if (
- self.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT
- and self.name != self._original_name
- ):
- # For renamed MultiObject fields, we just need to rename the through table
- old_through_table_name = self.original.through_table_name
- new_through_table_name = self.through_table_name
-
- # Check if old through table exists
- with connection.cursor() as cursor:
- tables = connection.introspection.table_names(cursor)
- old_table_exists = old_through_table_name in tables
-
- if old_table_exists:
- # Create temporary models to represent the old and new through table states
- old_through_meta = type(
- "Meta",
- (),
- {
- "db_table": old_through_table_name,
- "app_label": APP_LABEL,
- "managed": True,
- },
- )
- old_through_model = generate_model(
- f"TempOld{self.original.through_model_name}",
- (models.Model,),
- {
- "__module__": "netbox_custom_objects.models",
- "Meta": old_through_meta,
- "id": models.AutoField(primary_key=True),
- "source": models.ForeignKey(
- model,
- on_delete=models.CASCADE,
- db_column="source_id",
- related_name="+",
- ),
- "target": models.ForeignKey(
- model,
- on_delete=models.CASCADE,
- db_column="target_id",
- related_name="+",
- ),
- },
- )
+ # Use the branch connection when operating inside a branch so that schema
+ # editor operations target the branch schema rather than main.
+ schema_conn = _get_schema_connection()
- new_through_meta = type(
- "Meta",
- (),
- {
- "db_table": new_through_table_name,
- "app_label": APP_LABEL,
- "managed": True,
- },
- )
- new_through_model = generate_model(
- f"TempNew{self.through_model_name}",
- (models.Model,),
- {
- "__module__": "netbox_custom_objects.models",
- "Meta": new_through_meta,
- "id": models.AutoField(primary_key=True),
- "source": models.ForeignKey(
- model,
- on_delete=models.CASCADE,
- db_column="source_id",
- related_name="+",
- ),
- "target": models.ForeignKey(
- model,
- on_delete=models.CASCADE,
- db_column="target_id",
- related_name="+",
- ),
- },
- )
- new_through_model # To silence ruff error
+ model = self.custom_object_type.get_model()
- # Rename the table using Django's schema editor
- schema_editor.alter_db_table(
- old_through_model,
- old_through_table_name,
- new_through_table_name,
- )
- else:
- # No old table exists, create the new through table
- field_type.create_m2m_table(self, model, self.name)
+ with schema_conn.schema_editor() as schema_editor:
+ if self._state.adding:
+ _schema_add_field(self, model, schema_editor, schema_conn)
+ _apply_deferred_co_field(self)
+ else:
+ _schema_alter_field(self.original, self, model, schema_editor, schema_conn)
- # Alter the field normally (this updates the field definition)
- schema_editor.alter_field(model, old_field, model_field)
- else:
- # Normal field alteration
- schema_editor.alter_field(model, old_field, model_field)
+ # When the field is renamed, update ObjectChange / ChangeDiff JSON keys so
+ # historical audit records and branch diffs stay consistent with the new name.
+ if not self._state.adding and self._original_name != self.name:
+ _rename_objectchange_field_key(self, self._original_name, self.name)
# Ensure FK constraints are properly created for OBJECT fields with CASCADE behavior
should_ensure_fk = False
@@ -1756,7 +2219,10 @@ def save(self, *args, **kwargs):
# Clear and refresh the model cache for this CustomObjectType when a field is modified
self.custom_object_type.clear_model_cache(self.custom_object_type.id)
- # Update parent's cache_timestamp to invalidate cache across all workers
+ # Update parent's cache_timestamp to invalidate cache across all workers.
+ # snapshot() must be called first so that change logging has a correct pre-state;
+ # without it, diff()['pre'] would set ALL fields to None during branch revert.
+ self.custom_object_type.snapshot()
self.custom_object_type.save(update_fields=['cache_timestamp'])
super().save(*args, **kwargs)
@@ -1768,8 +2234,18 @@ def ensure_constraint():
transaction.on_commit(ensure_constraint)
+ # Regenerate the model from DB now that the field change is persisted.
+ # _schema_alter_field adds both old and new field names to the model
+ # class via contribute_to_class, and something between clear_model_cache
+ # and super().save() (e.g. a post_save signal on the COT) can call
+ # get_model() while the DB still has the old name, caching a stale model
+ # without the new name. Forcing a no_cache regeneration here (after
+ # super().save() committed the new name) ensures apps.all_models holds a
+ # clean model with exactly the current DB field list.
+ updated_model = self.custom_object_type.get_model(no_cache=True)
+
# Reregister SearchIndex with new set of searchable fields
- self.custom_object_type.register_custom_object_search_index(model)
+ self.custom_object_type.register_custom_object_search_index(updated_model)
# Reindex all objects of this type if search indexing was affected
if is_new:
@@ -1781,28 +2257,33 @@ def ensure_constraint():
transaction.on_commit(lambda: ReindexCustomObjectTypeJob.enqueue(cot_id=_cot_id))
def delete(self, *args, **kwargs):
- field_type = FIELD_TYPE_CLASS[self.type]()
- model_field = field_type.get_model_field(self)
+ # Use the branch connection when operating inside a branch.
+ schema_conn = _get_schema_connection()
+
model = self.custom_object_type.get_model()
- model_field.contribute_to_class(model, self.name)
- with connection.schema_editor() as schema_editor:
- if self.type == CustomFieldTypeChoices.TYPE_MULTIOBJECT:
- apps = model._meta.apps
- through_model = apps.get_model(APP_LABEL, self.through_model_name)
- schema_editor.delete_model(through_model)
- schema_editor.remove_field(model, model_field)
+ with schema_conn.schema_editor() as schema_editor:
+ _schema_remove_field(self, model, schema_editor)
# Clear the model cache for this CustomObjectType when a field is deleted
self.custom_object_type.clear_model_cache(self.custom_object_type.id)
- # Update parent's cache_timestamp to invalidate cache across all workers
+ # Update parent's cache_timestamp to invalidate cache across all workers.
+ # snapshot() must be called first so that change logging has a correct pre-state.
+ self.custom_object_type.snapshot()
self.custom_object_type.save(update_fields=['cache_timestamp'])
super().delete(*args, **kwargs)
+ # Regenerate and re-register the model so the app registry no longer includes
+ # the removed field. During squash revert the squash strategy may try to query
+ # CO rows (model.objects.get(pk=...)) after undoing this field but before undoing
+ # the CO itself. If the stale model class is still in the app registry it will
+ # include the now-absent column in its SELECT, causing ProgrammingError.
+ updated_model = self.custom_object_type.get_model()
+
# Reregister SearchIndex with new set of searchable fields
- self.custom_object_type.register_custom_object_search_index(model)
+ self.custom_object_type.register_custom_object_search_index(updated_model)
# Reindex all objects of this type since a searchable field was removed
if self.search_weight > 0:
diff --git a/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_edit.html b/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_edit.html
index 2ed1a515..7371ebec 100644
--- a/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_edit.html
+++ b/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_edit.html
@@ -9,10 +9,6 @@
{# Edit form #}
- {% if branch_warning %}
- {% include 'netbox_custom_objects/inc/branch_warning.html' %}
- {% endif %}
-
diff --git a/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_import.html b/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_import.html
index 3ae447b5..8bce96e7 100644
--- a/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_import.html
+++ b/netbox_custom_objects/templates/netbox_custom_objects/custom_object_bulk_import.html
@@ -10,10 +10,6 @@
- {% if branch_warning %}
- {% include 'netbox_custom_objects/inc/branch_warning.html' %}
- {% endif %}
-