diff --git a/.github/workflows/containerbuild.yml b/.github/workflows/containerbuild.yml
index 7b367f0eb0..0056d99cb4 100644
--- a/.github/workflows/containerbuild.yml
+++ b/.github/workflows/containerbuild.yml
@@ -79,7 +79,7 @@ jobs:
with:
skip_after_successful_duplicate: false
github_token: ${{ github.token }}
- paths: '["k8s/images/nginx/*", ".github/workflows/containerbuild.yml"]'
+ paths: '["docker/Dockerfile.nginx.prod", "docker/nginx/*", ".github/workflows/containerbuild.yml"]'
build_nginx:
name: nginx - test build of nginx Docker image
@@ -100,6 +100,6 @@ jobs:
uses: docker/build-push-action@v6
with:
context: ./
- file: ./k8s/images/nginx/Dockerfile
+ file: ./docker/Dockerfile.nginx.prod
platforms: linux/amd64
push: false
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index fc49a89d8a..0000000000
--- a/.gitmodules
+++ /dev/null
@@ -1,6 +0,0 @@
-[submodule "kolibri"]
- path = kolibri
- url = https://github.com/learningequality/kolibri.git
-[submodule "contentcuration/kolibri"]
- path = contentcuration/kolibri
- url = https://github.com/learningequality/kolibri.git
diff --git a/Makefile b/Makefile
index 002d337323..dc1e70b51e 100644
--- a/Makefile
+++ b/Makefile
@@ -171,11 +171,7 @@ dcbuild:
$(DOCKER_COMPOSE) build
dcup: .docker/minio .docker/postgres
- # run all services except for cloudprober
- $(DOCKER_COMPOSE) up studio-app celery-worker
-
-dcup-cloudprober: .docker/minio .docker/postgres
- # run all services including cloudprober
+ # run all services
$(DOCKER_COMPOSE) up
dcdown:
diff --git a/cloudbuild-pr.yaml b/cloudbuild-pr.yaml
deleted file mode 100644
index 2fb21ce2c5..0000000000
--- a/cloudbuild-pr.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-steps:
-- name: 'gcr.io/cloud-builders/docker'
- id: pull-app-image-cache
- args: ['pull', 'gcr.io/$PROJECT_ID/learningequality-studio-app:latest']
-
-- name: 'gcr.io/cloud-builders/docker'
- id: build-app-image
- waitFor: ['pull-app-image-cache'] # don't wait for previous steps
- args: [
- 'build',
- '-f', 'docker/Dockerfile.demo',
- '--cache-from', 'gcr.io/$PROJECT_ID/learningequality-studio-app:latest',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-app:latest',
- '.'
- ]
-
-- name: 'gcr.io/cloud-builders/docker'
- id: build-nginx-image
- waitFor: ['-'] # don't wait for previous steps
- args: [
- 'build',
- '-f', 'k8s/images/nginx/Dockerfile',
- '--cache-from', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:latest',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:latest',
- '.'
- ]
-
-- name: 'gcr.io/cloud-builders/docker'
- id: push-app-image
- waitFor: ['build-app-image']
- args: ['push', 'gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA']
-
-- name: 'gcr.io/cloud-builders/docker'
- id: push-nginx-image
- waitFor: ['build-nginx-image']
- args: ['push', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA']
-
-- name: 'gcr.io/cloud-builders/gcloud'
- id: decrypt-gcs-service-account
- waitFor: ['-']
- args: [
- 'kms', 'decrypt',
- '--location=global', '--keyring=builder-secrets', '--key=secret-encrypter',
- '--ciphertext-file=k8s/build-secrets/$PROJECT_ID-gcs-service-account.json.enc',
- '--plaintext-file=gcs-service-account.json'
- ]
-
-- name: 'gcr.io/cloud-builders/gcloud'
- id: create-new-database
- waitFor: ['-']
- dir: "k8s"
- entrypoint: 'bash'
- args: [
- '-c',
- './create-cloudsql-database.sh $_RELEASE_NAME $_DATABASE_INSTANCE_NAME'
- ]
-
-- name: 'gcr.io/$PROJECT_ID/helm'
- id: helm-deploy-studio-instance
- waitFor: ['decrypt-gcs-service-account', 'push-app-image', 'push-nginx-image']
- dir: "k8s"
- env:
- - 'CLOUDSDK_COMPUTE_ZONE=us-central1-f'
- - 'CLOUDSDK_CONTAINER_CLUSTER=dev-qa-cluster'
- secretEnv: ['POSTMARK_API_KEY']
- entrypoint: 'bash'
- args:
- - -c
- - >
- /builder/helm.bash &&
- ./helm-deploy.sh
- $_RELEASE_NAME
- $_STORAGE_BUCKET
- $COMMIT_SHA
- $$POSTMARK_API_KEY
- ""
- ""
- $_POSTGRES_USERNAME
- $_RELEASE_NAME
- $_POSTGRES_PASSWORD
- $PROJECT_ID-$_DATABASE_INSTANCE_NAME-sql-proxy-gcloud-sqlproxy.sqlproxy
- ../gcs-service-account.json
- $PROJECT_ID
-
-- name: 'gcr.io/cloud-builders/gsutil'
- id: remove-tarball-in-gcs
- waitFor: ['helm-deploy-studio-instance']
- args: ['rm', $_TARBALL_LOCATION]
-
-timeout: 3600s
-secrets:
-- kmsKeyName: projects/ops-central/locations/global/keyRings/builder-secrets/cryptoKeys/secret-encrypter
- secretEnv:
- POSTMARK_API_KEY: CiQA7z1GH3QhvCEWNn6KS64t/c8BEQng5I4CdMC6VGNxJkWmZrwSTgB+R8mv/PSrzlDmCYSOZc4bugWA+K+lJ8nIll1BBsZZEV5M9GuOCYVn6sVWg9pCIVujwyb4EvEy1QaKmZCzAnTw9aHEXDH0sruAUHBaTA==
-
-images:
- - 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA'
- - 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:latest'
- - 'gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA'
- - 'gcr.io/$PROJECT_ID/learningequality-studio-app:latest'
diff --git a/cloudbuild-production.yaml b/cloudbuild-production.yaml
deleted file mode 100644
index 3ff333a67f..0000000000
--- a/cloudbuild-production.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-steps:
-- name: 'gcr.io/cloud-builders/docker'
- id: pull-app-image-cache
- args: ['pull', 'gcr.io/$PROJECT_ID/learningequality-studio-app:latest']
-
-- name: 'gcr.io/cloud-builders/docker'
- id: build-app-image
- entrypoint: bash
- waitFor: ['pull-app-image-cache'] # wait for app image cache pull to finish
- args:
- - -c
- - >
- docker build
- --build_arg COMMIT_SHA=$COMMIT_SHA
- -f k8s/images/app/Dockerfile
- --cache-from gcr.io/$PROJECT_ID/learningequality-studio-app:latest
- -t gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA
- -t gcr.io/$PROJECT_ID/learningequality-studio-app:latest
- .
-
-- name: 'gcr.io/cloud-builders/docker'
- id: build-nginx-image
- waitFor: ['-'] # don't wait for previous steps
- args: [
- 'build',
- '-f', 'k8s/images/nginx/Dockerfile',
- '--cache-from', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:latest',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:latest',
- '.'
- ]
-
-- name: 'gcr.io/cloud-builders/docker'
- id: pull-prober-image-cache
- waitFor: ['-']
- args: ['pull', 'gcr.io/$PROJECT_ID/learningequality-studio-prober:latest']
-
-- name: 'gcr.io/cloud-builders/docker'
- id: build-prober-image
- waitFor: ['pull-prober-image-cache'] # don't wait for previous steps
- args: [
- 'build',
- '-f', 'k8s/images/prober/Dockerfile',
- '--cache-from', 'gcr.io/$PROJECT_ID/learningequality-studio-prober:latest',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-prober:$COMMIT_SHA',
- '-t', 'gcr.io/$PROJECT_ID/learningequality-studio-prober:latest',
- '.'
- ]
-
-- name: 'gcr.io/cloud-builders/docker'
- id: push-app-image
- waitFor: ['build-app-image']
- args: ['push', 'gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA']
-
-- name: 'gcr.io/cloud-builders/docker'
- id: push-nginx-image
- waitFor: ['build-nginx-image']
- args: ['push', 'gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA']
-
-- name: 'gcr.io/cloud-builders/docker'
- id: push-prober-image
- waitFor: ['build-prober-image']
- args: ['push', 'gcr.io/$PROJECT_ID/learningequality-studio-prober:$COMMIT_SHA']
-
-- name: 'gcr.io/$PROJECT_ID/helm'
- id: helm-deploy-studio-instance
- waitFor: ['push-app-image', 'push-nginx-image']
- dir: "k8s"
- env:
- - 'CLOUDSDK_COMPUTE_ZONE=us-central1-f'
- - 'CLOUDSDK_CONTAINER_CLUSTER=contentworkshop-central'
- entrypoint: 'bash'
- args:
- - -c
- - >
- /builder/helm.bash &&
- ./helm-deploy.sh
- $BRANCH_NAME
- gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA
- gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA
- $_STORAGE_BUCKET
- $COMMIT_SHA
- $PROJECT_ID
- $_DATABASE_INSTANCE_NAME
- us-central1
-
-
-substitutions:
- _DATABASE_INSTANCE_NAME: develop # by default, connect to the develop DB
- _STORAGE_BUCKET: develop-studio-content
-
-timeout: 3600s
-images:
- - gcr.io/$PROJECT_ID/learningequality-studio-nginx:latest
- - gcr.io/$PROJECT_ID/learningequality-studio-nginx:$COMMIT_SHA
- - gcr.io/$PROJECT_ID/learningequality-studio-app:latest
- - gcr.io/$PROJECT_ID/learningequality-studio-app:$COMMIT_SHA
- - 'gcr.io/$PROJECT_ID/learningequality-studio-prober:$COMMIT_SHA'
- - 'gcr.io/$PROJECT_ID/learningequality-studio-prober:latest'
diff --git a/contentcuration/contentcuration/constants/completion_criteria.py b/contentcuration/contentcuration/constants/completion_criteria.py
index 1a8c101e38..49d351bd7d 100644
--- a/contentcuration/contentcuration/constants/completion_criteria.py
+++ b/contentcuration/contentcuration/constants/completion_criteria.py
@@ -3,7 +3,9 @@
from jsonschema.validators import validator_for
from le_utils.constants import completion_criteria
from le_utils.constants import content_kinds
+from le_utils.constants import exercises
from le_utils.constants import mastery_criteria
+from le_utils.constants import modalities
def _build_validator():
@@ -52,10 +54,11 @@ def _build_validator():
completion_criteria.APPROX_TIME,
completion_criteria.REFERENCE,
},
+ content_kinds.TOPIC: {completion_criteria.MASTERY},
}
-def check_model_for_kind(data, kind):
+def check_model_for_kind(data, kind, modality=None):
model = data.get("model")
if kind is None or model is None or kind not in ALLOWED_MODELS_PER_KIND:
return
@@ -68,11 +71,37 @@ def check_model_for_kind(data, kind):
)
)
+ if kind == content_kinds.TOPIC:
+ check_topic_completion_criteria(data, modality)
-def validate(data, kind=None):
+
+def check_topic_completion_criteria(data, modality):
+ """
+ Validates topic-specific completion criteria rules:
+ - Topics can only have completion criteria if modality is UNIT
+ - Topics can only use PRE_POST_TEST mastery model
+ """
+ # Topics can only have completion criteria with UNIT modality
+ if modality != modalities.UNIT:
+ raise ValidationError(
+ "Topics can only have completion criteria with UNIT modality"
+ )
+
+ # Topics can only use PRE_POST_TEST mastery model
+ threshold = data.get("threshold", {})
+ mastery_model = threshold.get("mastery_model")
+ if mastery_model is not None and mastery_model != exercises.PRE_POST_TEST:
+ raise ValidationError(
+ "mastery_model '{}' is invalid for topic content kind; "
+ "only '{}' is allowed".format(mastery_model, exercises.PRE_POST_TEST)
+ )
+
+
+def validate(data, kind=None, modality=None):
"""
:param data: Dictionary of data to validate
:param kind: A str of the node content kind
+ :param modality: A str of the node modality (required for topics with completion criteria)
:raises: ValidationError: When invalid
"""
# empty dicts are okay
@@ -104,4 +133,4 @@ def validate(data, kind=None):
e.error_list.extend(error_descriptions)
raise e
- check_model_for_kind(data, kind)
+ check_model_for_kind(data, kind, modality)
diff --git a/contentcuration/contentcuration/frontend/accounts/pages/Create.vue b/contentcuration/contentcuration/frontend/accounts/pages/Create.vue
index 91c4431ea2..65afaa16a2 100644
--- a/contentcuration/contentcuration/frontend/accounts/pages/Create.vue
+++ b/contentcuration/contentcuration/frontend/accounts/pages/Create.vue
@@ -213,9 +213,10 @@
@@ -260,6 +261,7 @@
return {
valid: true,
registrationFailed: false,
+ submitting: false,
form: {
first_name: '',
last_name: '',
@@ -482,6 +484,12 @@
// We need to check the "acceptedAgreement" here explicitly because it is not a
// Vuetify form field and does not trigger the form validation.
if (this.$refs.form.validate() && this.acceptedAgreement) {
+ // Prevent double submission
+ if (this.submitting) {
+ return Promise.resolve();
+ }
+
+ this.submitting = true;
const cleanedData = this.clean(this.form);
return this.register(cleanedData)
.then(() => {
@@ -517,6 +525,9 @@
this.registrationFailed = true;
this.valid = false;
}
+ })
+ .finally(() => {
+ this.submitting = false;
});
} else if (this.$refs.top.scrollIntoView) {
this.$refs.top.scrollIntoView({ behavior: 'smooth' });
diff --git a/contentcuration/contentcuration/frontend/accounts/pages/__tests__/create.spec.js b/contentcuration/contentcuration/frontend/accounts/pages/__tests__/create.spec.js
index a2c2f40d71..11067e892e 100644
--- a/contentcuration/contentcuration/frontend/accounts/pages/__tests__/create.spec.js
+++ b/contentcuration/contentcuration/frontend/accounts/pages/__tests__/create.spec.js
@@ -179,4 +179,19 @@ describe('create', () => {
expect(wrapper.vm.registrationFailed).toBe(true);
});
});
+ describe('double-submit prevention', () => {
+ it('should prevent multiple API calls on rapid clicks', async () => {
+ const [wrapper, mocks] = await makeWrapper();
+
+ // Click submit multiple times
+ const p1 = wrapper.vm.submit();
+ const p2 = wrapper.vm.submit();
+ const p3 = wrapper.vm.submit();
+
+ await Promise.all([p1, p2, p3]);
+
+ // Only 1 API call should be made
+ expect(mocks.register).toHaveBeenCalledTimes(1);
+ });
+ });
});
diff --git a/contentcuration/contentcuration/frontend/channelEdit/pages/StagingTreePage/index.vue b/contentcuration/contentcuration/frontend/channelEdit/pages/StagingTreePage/index.vue
index 6939d232fc..4991a89e25 100644
--- a/contentcuration/contentcuration/frontend/channelEdit/pages/StagingTreePage/index.vue
+++ b/contentcuration/contentcuration/frontend/channelEdit/pages/StagingTreePage/index.vue
@@ -227,6 +227,7 @@
{
- this.loading = false;
- this.more = childrenResponse.more || null;
- },
- );
+ this.removeContentNodes({ parentId: this.trashId }).then(() => {
+ this.loadChildren({ parent: this.trashId, ordering: '-modified' }).then(
+ childrenResponse => {
+ this.loading = false;
+ this.more = childrenResponse.more || null;
+ },
+ );
+ });
},
moveNodes(target) {
return this.moveContentNodes({
diff --git a/contentcuration/contentcuration/frontend/shared/data/__tests__/ContentNodeResource.spec.js b/contentcuration/contentcuration/frontend/shared/data/__tests__/ContentNodeResource.spec.js
index 3f16719252..31bcb53bb4 100644
--- a/contentcuration/contentcuration/frontend/shared/data/__tests__/ContentNodeResource.spec.js
+++ b/contentcuration/contentcuration/frontend/shared/data/__tests__/ContentNodeResource.spec.js
@@ -574,8 +574,12 @@ describe('ContentNode methods', () => {
it('should update the node with the payload', async () => {
node.parent = parent.id;
- await expect(ContentNode.tableMove({ node, parent, payload, change })).resolves.toBe(payload);
- expect(table.update).toHaveBeenCalledWith(node.id, payload);
+ const result = await ContentNode.tableMove({ node, parent, payload, change });
+ expect(result).toMatchObject({ ...payload, modified: expect.any(String) });
+ expect(table.update).toHaveBeenCalledTimes(1);
+ const [updateId, updatePayload] = table.update.mock.calls[0];
+ expect(updateId).toBe(node.id);
+ expect(updatePayload).toBe(result);
expect(table.put).not.toBeCalled();
expect(table.update).not.toHaveBeenCalledWith(node.parent, { changed: true });
});
@@ -584,19 +588,23 @@ describe('ContentNode methods', () => {
node.parent = parent.id;
updated = false;
const newPayload = { ...payload, root_id: parent.root_id };
- await expect(ContentNode.tableMove({ node, parent, payload, change })).resolves.toMatchObject(
- newPayload,
+ const result = await ContentNode.tableMove({ node, parent, payload, change });
+ expect(result).toMatchObject({ ...newPayload, modified: expect.any(String) });
+ expect(table.update).toHaveBeenCalledWith(
+ node.id,
+ expect.objectContaining({ ...payload, modified: expect.any(String) }),
);
- expect(table.update).toHaveBeenCalledWith(node.id, payload);
- expect(table.put).toHaveBeenCalledWith(newPayload);
+ expect(table.put).toHaveBeenCalledWith(result);
expect(table.update).not.toHaveBeenCalledWith(node.parent, { changed: true });
});
it('should mark the old parent as changed', async () => {
- await expect(ContentNode.tableMove({ node, parent, payload, change })).resolves.toMatchObject(
- payload,
+ const result = await ContentNode.tableMove({ node, parent, payload, change });
+ expect(result).toMatchObject({ ...payload, modified: expect.any(String) });
+ expect(table.update).toHaveBeenCalledWith(
+ node.id,
+ expect.objectContaining({ ...payload, modified: expect.any(String) }),
);
- expect(table.update).toHaveBeenCalledWith(node.id, payload);
expect(table.put).not.toBeCalled();
expect(table.update).toHaveBeenCalledWith(node.parent, { changed: true });
});
diff --git a/contentcuration/contentcuration/frontend/shared/data/resources.js b/contentcuration/contentcuration/frontend/shared/data/resources.js
index 37c307d850..1ddc0cdbfe 100644
--- a/contentcuration/contentcuration/frontend/shared/data/resources.js
+++ b/contentcuration/contentcuration/frontend/shared/data/resources.js
@@ -1687,6 +1687,10 @@ export const ContentNode = new TreeResource({
async tableMove({ node, parent, payload }) {
// Do direct table writes here rather than using add/update methods to avoid
// creating unnecessary additional change events.
+ payload = {
+ ...payload,
+ modified: new Date().toISOString(),
+ };
const updated = await this.table.update(node.id, payload);
// Update didn't succeed, this node probably doesn't exist, do a put instead,
// but need to add in other parent info.
diff --git a/contentcuration/contentcuration/migrations/0155_fix_language_foreign_key_length.py b/contentcuration/contentcuration/migrations/0155_fix_language_foreign_key_length.py
new file mode 100644
index 0000000000..28824ed21e
--- /dev/null
+++ b/contentcuration/contentcuration/migrations/0155_fix_language_foreign_key_length.py
@@ -0,0 +1,47 @@
+# Generated manually to fix Language foreign key column length in M2M junction table
+# See https://github.com/learningequality/studio/issues/5618
+#
+# When Language.id was changed from max_length=7 to max_length=14 in migration
+# 0081, Django 1.9 did not cascade the primary key column size change to the
+# many-to-many junction table column. This migration fixes that column for
+# databases that were created before the migration squash.
+#
+# This migration is idempotent - it only alters the column if it is still varchar(7).
+from django.db import migrations
+
+
+# SQL to fix the column, checking if it needs to be altered first
+FORWARD_SQL = """
+DO $$
+BEGIN
+ -- Fix contentcuration_channel_included_languages.language_id (M2M junction table)
+ IF EXISTS (
+ SELECT 1 FROM information_schema.columns
+ WHERE table_schema = 'public'
+ AND table_name = 'contentcuration_channel_included_languages'
+ AND column_name = 'language_id'
+ AND character_maximum_length = 7
+ ) THEN
+ ALTER TABLE contentcuration_channel_included_languages
+ ALTER COLUMN language_id TYPE varchar(14);
+ END IF;
+END $$;
+"""
+
+# Reverse SQL is a no-op since we don't want to shrink the columns back
+# (that could cause data loss if longer language codes have been inserted)
+REVERSE_SQL = """
+-- No-op: Cannot safely reverse this migration as it may cause data loss
+SELECT 1;
+"""
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("contentcuration", "0154_alter_assessmentitem_type"),
+ ]
+
+ operations = [
+ migrations.RunSQL(FORWARD_SQL, REVERSE_SQL),
+ ]
diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py
index c2d94744e0..a3f15770cd 100644
--- a/contentcuration/contentcuration/models.py
+++ b/contentcuration/contentcuration/models.py
@@ -54,6 +54,7 @@
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import languages
+from le_utils.constants import modalities
from le_utils.constants import roles
from model_utils import FieldTracker
from mptt.models import MPTTModel
@@ -2290,22 +2291,23 @@ def mark_complete(self): # noqa C901
)
if not (self.extra_fields.get("mastery_model") or criterion):
errors.append("Missing mastery criterion")
- if criterion:
- try:
- completion_criteria.validate(
- criterion, kind=content_kinds.EXERCISE
- )
- except completion_criteria.ValidationError:
- errors.append("Mastery criterion is defined but is invalid")
- else:
- criterion = self.extra_fields and self.extra_fields.get(
- "options", {}
- ).get("completion_criteria", {})
- if criterion:
- try:
- completion_criteria.validate(criterion, kind=self.kind_id)
- except completion_criteria.ValidationError:
- errors.append("Completion criterion is defined but is invalid")
+ options = self.extra_fields and self.extra_fields.get("options", {}) or {}
+ criterion = options.get("completion_criteria", {})
+ modality = options.get("modality")
+ # UNIT modality topics must have completion criteria
+ if (
+ self.kind_id == content_kinds.TOPIC
+ and modality == modalities.UNIT
+ and not criterion
+ ):
+ errors.append("UNIT modality topics must have completion criteria")
+ if criterion:
+ try:
+ completion_criteria.validate(
+ criterion, kind=self.kind_id, modality=modality
+ )
+ except completion_criteria.ValidationError:
+ errors.append("Completion criterion is defined but is invalid")
self.complete = not errors
return errors
diff --git a/contentcuration/contentcuration/settings.py b/contentcuration/contentcuration/settings.py
index 0f18ed0131..285e7bef76 100644
--- a/contentcuration/contentcuration/settings.py
+++ b/contentcuration/contentcuration/settings.py
@@ -348,6 +348,11 @@ def gettext(s):
"result_serializer": "json",
"result_extended": True,
"worker_send_task_events": True,
+ # Graceful shutdown: allow 28 seconds for tasks to complete before forced termination
+ # This is 2 seconds less than Kubernetes terminationGracePeriodSeconds (30s)
+ "worker_soft_shutdown_timeout": int(
+ os.getenv("CELERY_WORKER_SOFT_SHUTDOWN_TIMEOUT", "28")
+ ),
}
# When cleaning up orphan nodes, only clean up any that have been last modified
diff --git a/contentcuration/contentcuration/tests/test_completion_criteria.py b/contentcuration/contentcuration/tests/test_completion_criteria.py
index a0daec10d7..09cb1529fc 100644
--- a/contentcuration/contentcuration/tests/test_completion_criteria.py
+++ b/contentcuration/contentcuration/tests/test_completion_criteria.py
@@ -2,7 +2,9 @@
from django.test import SimpleTestCase
from le_utils.constants import completion_criteria
from le_utils.constants import content_kinds
+from le_utils.constants import exercises
from le_utils.constants import mastery_criteria
+from le_utils.constants import modalities
from contentcuration.constants.completion_criteria import validate
@@ -40,3 +42,74 @@ def test_validate__content_kind(self):
},
kind=content_kinds.DOCUMENT,
)
+
+ def _make_preposttest_threshold(self):
+ """Helper to create a valid pre_post_test threshold structure."""
+ # UUIDs must be 32 hex characters
+ uuid_a = "a" * 32
+ uuid_b = "b" * 32
+ return {
+ "mastery_model": exercises.PRE_POST_TEST,
+ "pre_post_test": {
+ "assessment_item_ids": [uuid_a, uuid_b],
+ "version_a_item_ids": [uuid_a],
+ "version_b_item_ids": [uuid_b],
+ },
+ }
+
+ def test_validate__topic_with_unit_modality_and_preposttest__success(self):
+ """Topic with UNIT modality and PRE_POST_TEST mastery model should pass validation."""
+ validate(
+ {
+ "model": completion_criteria.MASTERY,
+ "threshold": self._make_preposttest_threshold(),
+ },
+ kind=content_kinds.TOPIC,
+ modality=modalities.UNIT,
+ )
+
+ def test_validate__topic_with_unit_modality_and_wrong_mastery_model__fail(self):
+ """Topic with UNIT modality but non-PRE_POST_TEST mastery model should fail."""
+ with self.assertRaisesRegex(
+ ValidationError, "mastery_model.*invalid for.*topic"
+ ):
+ validate(
+ {
+ "model": completion_criteria.MASTERY,
+ "threshold": {
+ "mastery_model": mastery_criteria.M_OF_N,
+ "m": 3,
+ "n": 5,
+ },
+ },
+ kind=content_kinds.TOPIC,
+ modality=modalities.UNIT,
+ )
+
+ def test_validate__topic_with_non_unit_modality_and_completion_criteria__fail(self):
+ """Topic with non-UNIT modality (e.g., LESSON) should not have completion criteria."""
+ with self.assertRaisesRegex(
+ ValidationError, "only.*completion criteria.*UNIT modality"
+ ):
+ validate(
+ {
+ "model": completion_criteria.MASTERY,
+ "threshold": self._make_preposttest_threshold(),
+ },
+ kind=content_kinds.TOPIC,
+ modality=modalities.LESSON,
+ )
+
+ def test_validate__topic_with_no_modality_and_completion_criteria__fail(self):
+ """Topic with no modality should not have completion criteria."""
+ with self.assertRaisesRegex(
+ ValidationError, "only.*completion criteria.*UNIT modality"
+ ):
+ validate(
+ {
+ "model": completion_criteria.MASTERY,
+ "threshold": self._make_preposttest_threshold(),
+ },
+ kind=content_kinds.TOPIC,
+ modality=None,
+ )
diff --git a/contentcuration/contentcuration/tests/test_contentnodes.py b/contentcuration/contentcuration/tests/test_contentnodes.py
index bc4f73b0b9..5ce8b472a4 100644
--- a/contentcuration/contentcuration/tests/test_contentnodes.py
+++ b/contentcuration/contentcuration/tests/test_contentnodes.py
@@ -10,6 +10,7 @@
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import format_presets
+from le_utils.constants import modalities
from mixer.backend.django import mixer
from mock import patch
@@ -1481,3 +1482,109 @@ def test_create_video_null_extra_fields(self):
new_obj.mark_complete()
except AttributeError:
self.fail("Null extra_fields not handled")
+
+ def _make_preposttest_extra_fields(self, modality):
+ """Helper to create extra_fields with valid pre_post_test completion criteria."""
+ uuid_a = "a" * 32
+ uuid_b = "b" * 32
+ return {
+ "options": {
+ "modality": modality,
+ "completion_criteria": {
+ "model": completion_criteria.MASTERY,
+ "threshold": {
+ "mastery_model": exercises.PRE_POST_TEST,
+ "pre_post_test": {
+ "assessment_item_ids": [uuid_a, uuid_b],
+ "version_a_item_ids": [uuid_a],
+ "version_b_item_ids": [uuid_b],
+ },
+ },
+ },
+ }
+ }
+
+ def test_create_topic_unit_modality_valid_preposttest_complete(self):
+ """Topic with UNIT modality and valid PRE_POST_TEST completion criteria should be complete."""
+ channel = testdata.channel()
+ new_obj = ContentNode(
+ title="Unit Topic",
+ kind_id=content_kinds.TOPIC,
+ parent=channel.main_tree,
+ extra_fields=self._make_preposttest_extra_fields(modalities.UNIT),
+ )
+ new_obj.save()
+ new_obj.mark_complete()
+ self.assertTrue(new_obj.complete)
+
+ def test_create_topic_unit_modality_wrong_mastery_model_incomplete(self):
+ """Topic with UNIT modality but M_OF_N mastery model should be incomplete."""
+ channel = testdata.channel()
+ new_obj = ContentNode(
+ title="Unit Topic",
+ kind_id=content_kinds.TOPIC,
+ parent=channel.main_tree,
+ extra_fields={
+ "options": {
+ "modality": modalities.UNIT,
+ "completion_criteria": {
+ "model": completion_criteria.MASTERY,
+ "threshold": {
+ "mastery_model": exercises.M_OF_N,
+ "m": 3,
+ "n": 5,
+ },
+ },
+ }
+ },
+ )
+ new_obj.save()
+ new_obj.mark_complete()
+ self.assertFalse(new_obj.complete)
+
+ def test_create_topic_lesson_modality_with_completion_criteria_incomplete(self):
+ """Topic with LESSON modality should not have completion criteria."""
+ channel = testdata.channel()
+ new_obj = ContentNode(
+ title="Lesson Topic",
+ kind_id=content_kinds.TOPIC,
+ parent=channel.main_tree,
+ extra_fields=self._make_preposttest_extra_fields(modalities.LESSON),
+ )
+ new_obj.save()
+ new_obj.mark_complete()
+ self.assertFalse(new_obj.complete)
+
+ def test_create_topic_no_modality_with_completion_criteria_incomplete(self):
+ """Topic with no modality should not have completion criteria."""
+ channel = testdata.channel()
+ extra_fields = self._make_preposttest_extra_fields(modalities.UNIT)
+ # Remove the modality
+ del extra_fields["options"]["modality"]
+ new_obj = ContentNode(
+ title="Topic Without Modality",
+ kind_id=content_kinds.TOPIC,
+ parent=channel.main_tree,
+ extra_fields=extra_fields,
+ )
+ new_obj.save()
+ new_obj.mark_complete()
+ self.assertFalse(new_obj.complete)
+
+ def test_create_topic_unit_modality_without_completion_criteria_incomplete(self):
+ """Topic with UNIT modality MUST have completion criteria - it's not optional."""
+ channel = testdata.channel()
+ new_obj = ContentNode(
+ title="Unit Topic Without Criteria",
+ kind_id=content_kinds.TOPIC,
+ parent=channel.main_tree,
+ extra_fields={
+ "options": {
+ "modality": modalities.UNIT,
+ # No completion_criteria
+ }
+ },
+ )
+ new_obj.save()
+ new_obj.mark_complete()
+ self.assertFalse(new_obj.complete)
diff --git a/contentcuration/contentcuration/tests/test_exportchannel.py b/contentcuration/contentcuration/tests/test_exportchannel.py
index 5c850597d7..b87b310344 100644
--- a/contentcuration/contentcuration/tests/test_exportchannel.py
+++ b/contentcuration/contentcuration/tests/test_exportchannel.py
@@ -16,6 +16,7 @@
from kolibri_content.router import set_active_content_database
from le_utils.constants import exercises
from le_utils.constants import format_presets
+from le_utils.constants import modalities
from le_utils.constants.labels import accessibility_categories
from le_utils.constants.labels import learning_activities
from le_utils.constants.labels import levels
@@ -304,6 +305,83 @@ def setUp(self):
}
first_topic_first_child.save()
+ # Add a UNIT topic with directly attached assessment items
+ unit_assessment_id_1 = uuid.uuid4().hex
+ unit_assessment_id_2 = uuid.uuid4().hex
+
+ unit_topic = create_node(
+ {"kind_id": "topic", "title": "Test Unit Topic", "children": []},
+ parent=self.content_channel.main_tree,
+ )
+ unit_topic.extra_fields = {
+ "options": {
+ "modality": modalities.UNIT,
+ "completion_criteria": {
+ "model": "mastery",
+ "threshold": {
+ "mastery_model": exercises.PRE_POST_TEST,
+ "pre_post_test": {
+ "assessment_item_ids": [
+ unit_assessment_id_1,
+ unit_assessment_id_2,
+ ],
+ "version_a_item_ids": [unit_assessment_id_1],
+ "version_b_item_ids": [unit_assessment_id_2],
+ },
+ },
+ },
+ }
+ }
+ unit_topic.save()
+
+ cc.AssessmentItem.objects.create(
+ contentnode=unit_topic,
+ assessment_id=unit_assessment_id_1,
+ type=exercises.SINGLE_SELECTION,
+ question="What is 2+2?",
+ answers=json.dumps(
+ [
+ {"answer": "4", "correct": True, "order": 1},
+ {"answer": "3", "correct": False, "order": 2},
+ ]
+ ),
+ hints=json.dumps([]),
+ raw_data="{}",
+ order=1,
+ randomize=False,
+ )
+
+ cc.AssessmentItem.objects.create(
+ contentnode=unit_topic,
+ assessment_id=unit_assessment_id_2,
+ type=exercises.SINGLE_SELECTION,
+ question="What is 3+3?",
+ answers=json.dumps(
+ [
+ {"answer": "6", "correct": True, "order": 1},
+ {"answer": "5", "correct": False, "order": 2},
+ ]
+ ),
+ hints=json.dumps([]),
+ raw_data="{}",
+ order=2,
+ randomize=False,
+ )
+
+ # Add a LESSON child topic under the UNIT with a video child
+ lesson_topic = create_node(
+ {
+ "kind_id": "topic",
+ "title": "Test Lesson Topic",
+ "children": [
+ {"kind_id": "video", "title": "Unit Lesson Video", "children": []},
+ ],
+ },
+ parent=unit_topic,
+ )
+ lesson_topic.extra_fields = {"options": {"modality": modalities.LESSON}}
+ lesson_topic.save()
+
set_channel_icon_encoding(self.content_channel)
self.tempdb = create_content_database(
self.content_channel, True, self.admin_user.id, True
@@ -348,6 +426,10 @@ def test_contentnode_incomplete_not_published(self):
assert incomplete_nodes.count() > 0
for node in complete_nodes:
+ # Skip nodes that are known to fail validation and not be published:
+ # - "Bad mastery test" exercise has no mastery model (checked separately below)
+ if node.title == "Bad mastery test":
+ continue
# if a parent node is incomplete, this node is excluded as well.
if node.get_ancestors().filter(complete=False).count() == 0:
assert kolibri_nodes.filter(pk=node.node_id).count() == 1
@@ -642,6 +724,30 @@ def test_qti_archive_contains_manifest_and_assessment_ids(self):
for i, ai in enumerate(qti_exercise.assessment_items.order_by("order")):
self.assertEqual(assessment_ids[i], hex_to_qti_id(ai.assessment_id))
+ def test_unit_topic_publishes_with_exercise_zip(self):
+ """Test that a TOPIC node with UNIT modality gets its directly
+ attached assessment items compiled into a zip file during publishing."""
+ unit_topic = cc.ContentNode.objects.get(title="Test Unit Topic")
+
+ # Assert UNIT topic has exercise file in Studio
+ unit_files = cc.File.objects.filter(
+ contentnode=unit_topic,
+ preset_id=format_presets.EXERCISE,
+ )
+ self.assertEqual(
+ unit_files.count(),
+ 1,
+ "UNIT topic should have exactly one exercise archive file",
+ )
+
+ # Assert NO assessment metadata in Kolibri export for UNIT topics
+ # UNIT topics store assessment config in options/completion_criteria instead
+ published_unit = kolibri_models.ContentNode.objects.get(title="Test Unit Topic")
+ self.assertFalse(
+ published_unit.assessmentmetadata.exists(),
+ "UNIT topic should NOT have assessment metadata",
+ )
+
class EmptyChannelTestCase(StudioTestCase):
@classmethod
diff --git a/contentcuration/contentcuration/tests/test_language_fk_column_length_migration.py b/contentcuration/contentcuration/tests/test_language_fk_column_length_migration.py
new file mode 100644
index 0000000000..625a1fd10c
--- /dev/null
+++ b/contentcuration/contentcuration/tests/test_language_fk_column_length_migration.py
@@ -0,0 +1,73 @@
+"""
+Test for migration 0155_fix_language_foreign_key_length.
+
+This test verifies that the migration correctly fixes the Language foreign key
+column in the included_languages M2M junction table from varchar(7) to varchar(14).
+"""
+from django.db import connection
+from django.db.migrations.executor import MigrationExecutor
+from django.test import TransactionTestCase
+
+
+# The M2M junction table column that should be fixed by the migration
+TABLE_NAME = "contentcuration_channel_included_languages"
+COLUMN_NAME = "language_id"
+
+
+def get_column_max_length(table_name, column_name):
+ """Get the character_maximum_length for a varchar column."""
+ with connection.cursor() as cursor:
+ cursor.execute(
+ """
+ SELECT character_maximum_length
+ FROM information_schema.columns
+ WHERE table_schema = 'public'
+ AND table_name = %s
+ AND column_name = %s
+ """,
+ [table_name, column_name],
+ )
+ row = cursor.fetchone()
+ return row[0] if row else None
+
+
+def set_column_to_varchar7(table_name, column_name):
+ """Shrink a varchar column to varchar(7) to simulate bad production state."""
+ with connection.cursor() as cursor:
+ cursor.execute(
+ f"ALTER TABLE {table_name} ALTER COLUMN {column_name} TYPE varchar(7)"
+ )
+
+
+class TestLanguageForeignKeyLengthMigration(TransactionTestCase):
+ """
+ Test that migration 0155 fixes varchar(7) Language FK column to varchar(14).
+
+ This simulates the production database state where Language.id was changed
+ from max_length=7 to max_length=14, but Django 1.9 didn't cascade the change
+ to the M2M junction table column.
+ """
+
+ def test_migration_fixes_varchar7_column(self):
+ # First, shrink column back to varchar(7) to simulate bad state
+ set_column_to_varchar7(TABLE_NAME, COLUMN_NAME)
+ # Verify the column is now varchar(7)
+ self.assertEqual(
+ get_column_max_length(TABLE_NAME, COLUMN_NAME),
+ 7,
+ f"{TABLE_NAME}.{COLUMN_NAME} should be varchar(7) before migration",
+ )
+
+ # Run migration 0155
+ executor = MigrationExecutor(connection)
+ executor.migrate([("contentcuration", "0154_alter_assessmentitem_type")])
+ executor = MigrationExecutor(connection)
+ executor.loader.build_graph()
+ executor.migrate([("contentcuration", "0155_fix_language_foreign_key_length")])
+
+ # Verify column is now varchar(14)
+ self.assertEqual(
+ get_column_max_length(TABLE_NAME, COLUMN_NAME),
+ 14,
+ f"{TABLE_NAME}.{COLUMN_NAME} should be varchar(14) after migration",
+ )
diff --git a/contentcuration/contentcuration/tests/test_sync.py b/contentcuration/contentcuration/tests/test_sync.py
index 8d011cc1db..73abc842c0 100644
--- a/contentcuration/contentcuration/tests/test_sync.py
+++ b/contentcuration/contentcuration/tests/test_sync.py
@@ -4,6 +4,7 @@
from le_utils.constants import content_kinds
from le_utils.constants import file_formats
from le_utils.constants import format_presets
+from le_utils.constants import roles
from le_utils.constants.labels import accessibility_categories
from le_utils.constants.labels import learning_activities
from le_utils.constants.labels import levels
@@ -17,6 +18,7 @@
from contentcuration.models import Channel
from contentcuration.models import ContentTag
from contentcuration.models import File
+from contentcuration.models import Language
from contentcuration.models import License
from contentcuration.tests import testdata
from contentcuration.tests.base import StudioAPITestCase
@@ -108,7 +110,6 @@ def test_sync_files_add(self):
if child.title == contentnode.title:
target_child = child
break
- self.assertIsNotNone(target_child)
self.assertEqual(target_child.files.count(), contentnode.files.count())
db_file = self._add_temp_file_to_content_node(contentnode)
@@ -172,7 +173,6 @@ def test_sync_assessment_item_add(self):
source_node_id=contentnode.node_id
)
- self.assertIsNotNone(target_child)
self.assertEqual(
target_child.assessment_items.count(), contentnode.assessment_items.count()
)
@@ -224,7 +224,6 @@ def test_sync_tags_add(self):
source_node_id=contentnode.node_id
)
- self.assertIsNotNone(target_child)
self.assertEqual(target_child.tags.count(), contentnode.tags.count())
tag = ContentTag.objects.create(tag_name="tagname")
@@ -263,7 +262,6 @@ def test_sync_tags_add_multiple_tags(self):
source_node_id=contentnode.node_id
)
- self.assertIsNotNone(target_child)
self.assertEqual(target_child.tags.count(), contentnode.tags.count())
# Create the same tag twice
@@ -314,8 +312,6 @@ def test_sync_channel_titles_and_descriptions(self):
source_node_id=contentnode.node_id
)
- self.assertIsNotNone(target_child)
-
for key, value in labels.items():
setattr(contentnode, key, value)
contentnode.save()
@@ -407,8 +403,6 @@ def test_sync_channel_other_metadata_labels(self):
source_node_id=contentnode.node_id
)
- self.assertIsNotNone(target_child)
-
for key, value in labels.items():
setattr(contentnode, key, {value: True})
contentnode.save()
@@ -429,6 +423,239 @@ def test_sync_channel_other_metadata_labels(self):
for key, value in labels.items():
self.assertEqual(getattr(target_child, key), {value: True})
+ def test_sync_language_field(self):
+ """
+ Test that the language field is synced correctly when sync_resource_details is True.
+ """
+ self.assertFalse(self.channel.has_changes())
+ self.assertFalse(self.derivative_channel.has_changes())
+
+ contentnode = (
+ self.channel.main_tree.get_descendants()
+ .exclude(kind_id=content_kinds.TOPIC)
+ .first()
+ )
+
+ target_child = self.derivative_channel.main_tree.get_descendants().get(
+ source_node_id=contentnode.node_id
+ )
+
+ # Set a different language on the original node
+ spanish_language = Language.objects.get(id="es")
+ contentnode.language = spanish_language
+ contentnode.save()
+
+ sync_channel(
+ self.derivative_channel,
+ sync_titles_and_descriptions=False,
+ sync_resource_details=True,
+ sync_files=False,
+ sync_assessment_items=False,
+ )
+
+ self.assertTrue(self.channel.has_changes())
+ self.assertTrue(self.derivative_channel.has_changes())
+
+ target_child.refresh_from_db()
+ self.assertEqual(target_child.language, spanish_language)
+
+ def test_sync_provider_field(self):
+ """
+ Test that the provider field is synced correctly when sync_resource_details is True.
+ """
+ self.assertFalse(self.channel.has_changes())
+ self.assertFalse(self.derivative_channel.has_changes())
+
+ contentnode = (
+ self.channel.main_tree.get_descendants()
+ .exclude(kind_id=content_kinds.TOPIC)
+ .first()
+ )
+
+ target_child = self.derivative_channel.main_tree.get_descendants().get(
+ source_node_id=contentnode.node_id
+ )
+
+ # Set a provider on the original node
+ contentnode.provider = "Test Provider Organization"
+ contentnode.save()
+
+ sync_channel(
+ self.derivative_channel,
+ sync_titles_and_descriptions=False,
+ sync_resource_details=True,
+ sync_files=False,
+ sync_assessment_items=False,
+ )
+
+ self.assertTrue(self.channel.has_changes())
+ self.assertTrue(self.derivative_channel.has_changes())
+
+ target_child.refresh_from_db()
+ self.assertEqual(target_child.provider, "Test Provider Organization")
+
+ def test_sync_aggregator_field(self):
+ """
+ Test that the aggregator field is synced correctly when sync_resource_details is True.
+ """
+ self.assertFalse(self.channel.has_changes())
+ self.assertFalse(self.derivative_channel.has_changes())
+
+ contentnode = (
+ self.channel.main_tree.get_descendants()
+ .exclude(kind_id=content_kinds.TOPIC)
+ .first()
+ )
+
+ target_child = self.derivative_channel.main_tree.get_descendants().get(
+ source_node_id=contentnode.node_id
+ )
+
+ # Set an aggregator on the original node
+ contentnode.aggregator = "Test Aggregator Organization"
+ contentnode.save()
+
+ sync_channel(
+ self.derivative_channel,
+ sync_titles_and_descriptions=False,
+ sync_resource_details=True,
+ sync_files=False,
+ sync_assessment_items=False,
+ )
+
+ self.assertTrue(self.channel.has_changes())
+ self.assertTrue(self.derivative_channel.has_changes())
+
+ target_child.refresh_from_db()
+ self.assertEqual(target_child.aggregator, "Test Aggregator Organization")
+
+ def test_sync_role_visibility_field(self):
+ """
+ Test that the role_visibility field is synced correctly when sync_resource_details is True.
+ """
+ self.assertFalse(self.channel.has_changes())
+ self.assertFalse(self.derivative_channel.has_changes())
+
+ contentnode = (
+ self.channel.main_tree.get_descendants()
+ .exclude(kind_id=content_kinds.TOPIC)
+ .first()
+ )
+
+ target_child = self.derivative_channel.main_tree.get_descendants().get(
+ source_node_id=contentnode.node_id
+ )
+
+ # Set role_visibility to COACH on the original node
+ contentnode.role_visibility = roles.COACH
+ contentnode.save()
+
+ sync_channel(
+ self.derivative_channel,
+ sync_titles_and_descriptions=False,
+ sync_resource_details=True,
+ sync_files=False,
+ sync_assessment_items=False,
+ )
+
+ self.assertTrue(self.channel.has_changes())
+ self.assertTrue(self.derivative_channel.has_changes())
+
+ target_child.refresh_from_db()
+ self.assertEqual(target_child.role_visibility, roles.COACH)
+
+ def test_sync_all_missing_fields(self):
+ """
+ Test that all four previously missing fields (language, provider, aggregator,
+ role_visibility) are synced together when sync_resource_details is True.
+ """
+ self.assertFalse(self.channel.has_changes())
+ self.assertFalse(self.derivative_channel.has_changes())
+
+ contentnode = (
+ self.channel.main_tree.get_descendants()
+ .exclude(kind_id=content_kinds.TOPIC)
+ .first()
+ )
+
+ target_child = self.derivative_channel.main_tree.get_descendants().get(
+ source_node_id=contentnode.node_id
+ )
+
+ # Set all four fields on the original node
+ french_language = Language.objects.get(id="fr")
+ contentnode.language = french_language
+ contentnode.provider = "Comprehensive Test Provider"
+ contentnode.aggregator = "Comprehensive Test Aggregator"
+ contentnode.role_visibility = roles.COACH
+ contentnode.save()
+
+ sync_channel(
+ self.derivative_channel,
+ sync_titles_and_descriptions=False,
+ sync_resource_details=True,
+ sync_files=False,
+ sync_assessment_items=False,
+ )
+
+ self.assertTrue(self.channel.has_changes())
+ self.assertTrue(self.derivative_channel.has_changes())
+
+ target_child.refresh_from_db()
+ self.assertEqual(target_child.language, french_language)
+ self.assertEqual(target_child.provider, "Comprehensive Test Provider")
+ self.assertEqual(target_child.aggregator, "Comprehensive Test Aggregator")
+ self.assertEqual(target_child.role_visibility, roles.COACH)
+
+ def test_sync_missing_fields_not_synced_without_flag(self):
+ """
+ Test that the four fields (language, provider, aggregator, role_visibility)
+ are NOT synced when sync_resource_details is False.
+ """
+ self.assertFalse(self.channel.has_changes())
+ self.assertFalse(self.derivative_channel.has_changes())
+
+ contentnode = (
+ self.channel.main_tree.get_descendants()
+ .exclude(kind_id=content_kinds.TOPIC)
+ .first()
+ )
+
+ target_child = self.derivative_channel.main_tree.get_descendants().get(
+ source_node_id=contentnode.node_id
+ )
+
+ # Store original values
+ original_language = target_child.language
+ original_provider = target_child.provider
+ original_aggregator = target_child.aggregator
+ original_role_visibility = target_child.role_visibility
+
+ # Modify all four fields in the original node
+ german_language = Language.objects.get(id="de")
+ contentnode.language = german_language
+ contentnode.provider = "Should Not Sync Provider"
+ contentnode.aggregator = "Should Not Sync Aggregator"
+ contentnode.role_visibility = roles.COACH
+ contentnode.save()
+
+ # Sync WITHOUT sync_resource_details
+ sync_channel(
+ self.derivative_channel,
+ sync_titles_and_descriptions=False,
+ sync_resource_details=False,
+ sync_files=False,
+ sync_assessment_items=False,
+ )
+
+ target_child.refresh_from_db()
+
+ # Verify fields remain unchanged
+ self.assertEqual(target_child.language, original_language)
+ self.assertEqual(target_child.provider, original_provider)
+ self.assertEqual(target_child.aggregator, original_aggregator)
+ self.assertEqual(target_child.role_visibility, original_role_visibility)
+
class ContentIDTestCase(SyncTestMixin, StudioAPITestCase):
def setUp(self):
diff --git a/contentcuration/contentcuration/tests/views/test_users.py b/contentcuration/contentcuration/tests/views/test_users.py
index a17da93f8a..4c5f635204 100644
--- a/contentcuration/contentcuration/tests/views/test_users.py
+++ b/contentcuration/contentcuration/tests/views/test_users.py
@@ -1,11 +1,13 @@
import json
+from django.db import IntegrityError
from django.http.response import HttpResponseBadRequest
from django.http.response import HttpResponseForbidden
from django.http.response import HttpResponseNotAllowed
from django.http.response import HttpResponseRedirectBase
from django.urls import reverse_lazy
from mock import mock
+from mock import patch
from contentcuration.models import User
from contentcuration.tests import testdata
@@ -127,8 +129,8 @@ def setUp(self):
first_name="Tester",
last_name="Tester",
email="tester@tester.com",
- pasword1="tester123",
- pasword2="tester123",
+ password1="tester123",
+ password2="tester123",
uses="IDK",
source="IDK",
policies=json.dumps(dict(policy_etc=True)),
@@ -148,8 +150,8 @@ def test_post__inactive_registration(self):
self.assertIsInstance(response, HttpResponseNotAllowed)
def test_post__password_too_short(self):
- self.request_data["pasword1"] = "123"
- self.request_data["pasword2"] = "123"
+ self.request_data["password1"] = "123"
+ self.request_data["password2"] = "123"
response = self.post(self.view, self.request_data)
self.assertIsInstance(response, HttpResponseBadRequest)
self.assertIn("password1", response.content.decode())
@@ -160,6 +162,22 @@ def test_post__after_delete(self):
response = self.post(self.view, self.request_data)
self.assertIsInstance(response, HttpResponseForbidden)
+ @patch("contentcuration.views.users.UserRegistrationView.register")
+ def test_post__handles_integrity_error_gracefully(self, mock_register):
+ """Test that IntegrityError during registration returns 403 instead of 500"""
+ # Simulate IntegrityError (race condition on duplicate email)
+ mock_register.side_effect = IntegrityError(
+ 'duplicate key value violates unique constraint "contentcuration_user_email_key"'
+ )
+
+ response = self.post(self.view, self.request_data)
+
+ # Should return 403 Forbidden, not 500
+ self.assertIsInstance(response, HttpResponseForbidden)
+ # Error response should include "email" field
+ error_data = json.loads(response.content.decode())
+ self.assertIn("email", error_data)
+
class UserActivationViewTestCase(StudioAPITestCase):
def setUp(self):
diff --git a/contentcuration/contentcuration/utils/publish.py b/contentcuration/contentcuration/utils/publish.py
index 3e28f2d0e0..9e9e190105 100644
--- a/contentcuration/contentcuration/utils/publish.py
+++ b/contentcuration/contentcuration/utils/publish.py
@@ -35,6 +35,7 @@
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
+from le_utils.constants import modalities
from le_utils.constants import roles
from search.models import ChannelFullTextSearch
from search.models import ContentNodeFullTextSearch
@@ -227,6 +228,22 @@ def assign_license_to_contentcuration_nodes(channel, license):
]
+def has_assessments(node):
+ """Check if a node should have its assessment items published.
+
+ Returns True for EXERCISE nodes and TOPIC nodes with UNIT modality
+ that have assessment items.
+ """
+ if node.kind_id == content_kinds.EXERCISE:
+ return True
+ if node.kind_id == content_kinds.TOPIC:
+ options = node.extra_fields.get("options", {}) if node.extra_fields else {}
+ if options.get("modality") == modalities.UNIT:
+ # Only return True if the UNIT has assessment items
+ return node.assessment_items.filter(deleted=False).exists()
+ return False
+
+
class TreeMapper:
def __init__(
self,
@@ -296,9 +313,10 @@ def recurse_nodes(self, node, inherited_fields): # noqa C901
# Only process nodes that are either non-topics or have non-topic descendants
if node.is_publishable():
- # early validation to make sure we don't have any exercises without mastery models
- # which should be unlikely when the node is complete, but just in case
- if node.kind_id == content_kinds.EXERCISE:
+ # early validation to make sure we don't have any nodes with assessments
+ # without mastery models, which should be unlikely when the node is complete,
+ # but just in case
+ if has_assessments(node):
try:
# migrates and extracts the mastery model from the exercise
_, mastery_model = parse_assessment_metadata(node)
@@ -306,8 +324,8 @@ def recurse_nodes(self, node, inherited_fields): # noqa C901
raise ValueError("Exercise does not have a mastery model")
except Exception as e:
logging.warning(
- "Unable to parse exercise {id} mastery model: {error}".format(
- id=node.pk, error=str(e)
+ "Unable to parse exercise {id} {title} mastery model: {error}".format(
+ id=node.pk, title=node.title, error=str(e)
)
)
return
@@ -322,7 +340,7 @@ def recurse_nodes(self, node, inherited_fields): # noqa C901
metadata,
)
- if node.kind_id == content_kinds.EXERCISE:
+ if has_assessments(node):
exercise_data = process_assessment_metadata(node)
any_free_response = any(
t == exercises.FREE_RESPONSE
@@ -359,10 +377,16 @@ def recurse_nodes(self, node, inherited_fields): # noqa C901
)
generator.create_exercise_archive()
- create_kolibri_assessment_metadata(node, kolibrinode)
+ # Only create assessment metadata for exercises, not UNIT topics
+ # UNIT topics store their assessment config in options/completion_criteria
+ if node.kind_id == content_kinds.EXERCISE:
+ create_kolibri_assessment_metadata(node, kolibrinode)
elif node.kind_id == content_kinds.SLIDESHOW:
create_slideshow_manifest(node, user_id=self.user_id)
- elif node.kind_id == content_kinds.TOPIC:
+
+ # TOPIC nodes need to recurse into children, including UNIT topics
+ # that also had their assessments processed above
+ if node.kind_id == content_kinds.TOPIC:
for child in node.children.all():
self.recurse_nodes(child, metadata)
create_associated_file_objects(kolibrinode, node)
diff --git a/contentcuration/contentcuration/utils/sync.py b/contentcuration/contentcuration/utils/sync.py
index a11ce4aeab..2987d1c75b 100644
--- a/contentcuration/contentcuration/utils/sync.py
+++ b/contentcuration/contentcuration/utils/sync.py
@@ -71,6 +71,10 @@ def sync_node(
"license_description",
"copyright_holder",
"author",
+ "language",
+ "provider",
+ "aggregator",
+ "role_visibility",
"extra_fields",
"categories",
"learner_needs",
diff --git a/contentcuration/contentcuration/views/internal.py b/contentcuration/contentcuration/views/internal.py
index 93be3e3043..07b4014b00 100644
--- a/contentcuration/contentcuration/views/internal.py
+++ b/contentcuration/contentcuration/views/internal.py
@@ -839,7 +839,9 @@ def create_node(node_data, parent_node, sort_order): # noqa: C901
if "options" in extra_fields and "completion_criteria" in extra_fields["options"]:
try:
completion_criteria.validate(
- extra_fields["options"]["completion_criteria"], kind=node_data["kind"]
+ extra_fields["options"]["completion_criteria"],
+ kind=node_data["kind"],
+ modality=extra_fields["options"].get("modality"),
)
except completion_criteria.ValidationError:
raise NodeValidationError(
diff --git a/contentcuration/contentcuration/views/users.py b/contentcuration/contentcuration/views/users.py
index 66a6652d0b..34a986895b 100644
--- a/contentcuration/contentcuration/views/users.py
+++ b/contentcuration/contentcuration/views/users.py
@@ -11,6 +11,7 @@
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
+from django.db import IntegrityError
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
@@ -181,8 +182,19 @@ def get_form_kwargs(self):
return kwargs
def form_valid(self, form):
- self.register(form)
- return HttpResponse()
+ try:
+ self.register(form)
+ return HttpResponse()
+ except IntegrityError as e:
+ # Handle race condition where duplicate user is created between
+ # form validation and save (e.g., double submit)
+ logger.warning(
+ "IntegrityError during user registration, likely due to race condition: %s",
+ str(e),
+ extra={"email": form.cleaned_data.get("email")},
+ )
+ # Return same error as duplicate active account for consistency
+ return HttpResponseForbidden(json.dumps(["email"]))
def form_invalid(self, form):
# frontend handles the error messages
diff --git a/deploy/cloudprober.cfg b/deploy/cloudprober.cfg
deleted file mode 100644
index c5a129455e..0000000000
--- a/deploy/cloudprober.cfg
+++ /dev/null
@@ -1,187 +0,0 @@
-probe {
- name: "google_homepage"
- type: HTTP
- targets {
- host_names: "www.google.com"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "facebook_homepage"
- type: HTTP
- targets {
- host_names: "www.facebook.com"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "studio_homepage"
- type: HTTP
- targets {
- host_names: "studio.learningequality.org"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "login"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/login_page_probe.py"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "postgres"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/postgres_probe.py"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "workers"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/worker_probe.py"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 5000 # 5s
-}
-
-probe {
- name: "channel_creation"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/channel_creation_probe.py"
- }
- interval_msec: 300000 # 5mins
- timeout_msec: 10000 # 10s
-}
-
-probe {
- name: "channel_update"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/channel_update_probe.py"
- }
- interval_msec: 60000 # 1min
- timeout_msec: 10000 # 10s
-}
-
-probe {
- name: "channel_edit_page"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/channel_edit_page_probe.py"
- }
- interval_msec: 10000 # 10s
- timeout_msec: 10000 # 10s
-}
-
-probe {
- name: "postgres_read_contentnode"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/postgres_read_contentnode_probe.py"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "postgres_write_contentnode"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/postgres_write_contentnode_probe.py"
- }
- interval_msec: 60000 # 60s
- timeout_msec: 1000 # 1s
-}
-
-probe {
- name: "topic_creation"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/topic_creation_probe.py"
- }
- interval_msec: 300000 # 5mins
- timeout_msec: 20000 # 20s
-}
-
-probe {
- name: "postmark_api"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/postmark_api_probe.py"
- }
- interval_msec: 300000 # 5 minutes
- timeout_msec: 5000 # 5s
-}
-
-probe {
- name: "publishing_status"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/publishing_status_probe.py"
- }
- interval_msec: 3600000 # 1 hour
- timeout_msec: 10000 # 10s
-}
-
-probe {
- name: "unapplied_changes_status"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/unapplied_changes_probe.py"
- }
- interval_msec: 1800000 # 30 minutes
- timeout_msec: 20000 # 20s
-}
-
-probe {
- name: "task_queue_status"
- type: EXTERNAL
- targets { dummy_targets {} }
- external_probe {
- mode: ONCE
- command: "./probers/task_queue_probe.py"
- }
- interval_msec: 600000 # 10 minutes
- timeout_msec: 10000 # 10s
-}
-
-# Note: When deploying on GKE, the error logs can be found under GCE VM instance.
diff --git a/deploy/prober-entrypoint.sh b/deploy/prober-entrypoint.sh
deleted file mode 100755
index 323e03cab0..0000000000
--- a/deploy/prober-entrypoint.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-curl -L -o cloudprober.zip https://github.com/google/cloudprober/releases/download/v0.10.2/cloudprober-v0.10.2-linux-x86_64.zip
-unzip -p cloudprober.zip > /bin/cloudprober
-chmod +x /bin/cloudprober
-
-cd deploy/
-cloudprober -logtostderr -config_file cloudprober.cfg
diff --git a/deploy/probers/base.py b/deploy/probers/base.py
deleted file mode 100644
index 7f85a18c16..0000000000
--- a/deploy/probers/base.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import datetime
-import os
-
-import requests
-
-USERNAME = os.getenv("PROBER_STUDIO_USERNAME") or "a@a.com"
-PASSWORD = os.getenv("PROBER_STUDIO_PASSWORD") or "a"
-PRODUCTION_MODE_ON = os.getenv("PROBER_STUDIO_PRODUCTION_MODE_ON") or False
-STUDIO_BASE_URL = os.getenv("PROBER_STUDIO_BASE_URL") or "http://127.0.0.1:8080"
-
-
-class BaseProbe(object):
-
- metric = "STUB_METRIC"
- develop_only = False
- prober_name = "PROBER"
-
- def __init__(self):
- self.session = requests.Session()
- self.session.headers.update(
- {"User-Agent": "Studio-Internal-Prober={}".format(self.prober_name)}
- )
-
- def do_probe(self):
- pass
-
- def _login(self):
- # get our initial csrf
- url = self._construct_studio_url("/en/accounts/")
- r = self.session.get(url)
- r.raise_for_status()
- csrf = self.session.cookies.get("csrftoken")
- formdata = {
- "username": USERNAME,
- "password": PASSWORD,
- }
- headers = {
- "referer": url,
- "X-Studio-Internal-Prober": "LOGIN-PROBER",
- "X-CSRFToken": csrf,
- }
-
- r = self.session.post(
- self._construct_studio_url("/en/accounts/login/"),
- json=formdata,
- headers=headers,
- allow_redirects=False,
- )
- r.raise_for_status()
-
- # Since logging into Studio with correct username and password should redirect, fail otherwise
- if r.status_code != 302:
- raise ProberException("Cannot log into Studio.")
-
- return r
-
- def _construct_studio_url(self, path):
- path_stripped = path.lstrip("/")
- url = "{base_url}/{path}".format(base_url=STUDIO_BASE_URL, path=path_stripped)
- return url
-
- def request(
- self,
- path,
- action="GET",
- data=None,
- headers=None,
- contenttype="application/json",
- ):
- data = data or {}
- headers = headers or {}
-
- # Make sure session is logged in
- if not self.session.cookies.get("csrftoken"):
- self._login()
-
- url = self._construct_studio_url(path)
-
- headers.update(
- {
- "X-CSRFToken": self.session.cookies.get("csrftoken"),
- }
- )
-
- headers.update({"Content-Type": contenttype})
- headers.update({"X-Studio-Internal-Prober": self.prober_name})
- response = self.session.request(action, url, data=data, headers=headers)
- response.raise_for_status()
-
- return response
-
- def run(self):
-
- if self.develop_only and PRODUCTION_MODE_ON:
- return
-
- start_time = datetime.datetime.now()
-
- self.do_probe()
-
- end_time = datetime.datetime.now()
- elapsed = (end_time - start_time).total_seconds() * 1000
-
- print( # noqa: T201
- "{metric_name} {latency_ms}".format(
- metric_name=self.metric, latency_ms=elapsed
- )
- )
-
-
-class ProberException(Exception):
- pass
diff --git a/deploy/probers/channel_creation_probe.py b/deploy/probers/channel_creation_probe.py
deleted file mode 100755
index b7ab8d4254..0000000000
--- a/deploy/probers/channel_creation_probe.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-import json
-
-from base import BaseProbe
-
-
-class ChannelCreationProbe(BaseProbe):
-
- metric = "channel_creation_latency_msec"
- develop_only = True
- prober_name = "CHANNEL-CREATION-PROBER"
-
- def _get_user_id(self):
- response = self.request("api/internal/authenticate_user_internal")
- return json.loads(response.content)["user_id"]
-
- def do_probe(self):
- payload = {
- "description": "description",
- "language": "en-PT",
- "name": "test",
- "thumbnail": "b3897c3d96bde7f1cff77ce368924098.png",
- "content_defaults": "{}",
- "editors": [self._get_user_id()],
- }
- self.request(
- "api/channel",
- action="POST",
- data=payload,
- contenttype="application/x-www-form-urlencoded",
- )
-
-
-if __name__ == "__main__":
- ChannelCreationProbe().run()
diff --git a/deploy/probers/channel_edit_page_probe.py b/deploy/probers/channel_edit_page_probe.py
deleted file mode 100755
index 2b3b80d2a3..0000000000
--- a/deploy/probers/channel_edit_page_probe.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-import json
-
-from base import BaseProbe
-
-
-class ChannelEditPageProbe(BaseProbe):
-
- metric = "channel_edit_page_latency_msec"
- prober_name = "CHANNEL-EDIT-PAGE-PROBER"
-
- def _get_channel(self):
- response = self.request("api/probers/get_prober_channel")
- return json.loads(response.content)
-
- def do_probe(self):
- channel = self._get_channel()
- path = "channels/{}/edit".format(channel["id"])
- self.request(path)
-
-
-if __name__ == "__main__":
- ChannelEditPageProbe().run()
diff --git a/deploy/probers/channel_update_probe.py b/deploy/probers/channel_update_probe.py
deleted file mode 100755
index 1951df9348..0000000000
--- a/deploy/probers/channel_update_probe.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-import json
-
-from base import BaseProbe
-
-
-class ChannelUpdateProbe(BaseProbe):
-
- metric = "channel_update_latency_msec"
- prober_name = "CHANNEL-UPDATE-PROBER"
- develop_only = True
-
- def _get_channel(self):
- response = self.request("api/probers/get_prober_channel")
- return json.loads(response.content)
-
- def do_probe(self):
- channel = self._get_channel()
- payload = {"name": "New Test Name", "id": channel["id"]}
- path = "api/channel/{}".format(channel["id"])
- self.request(
- path,
- action="PATCH",
- data=payload,
- contenttype="application/x-www-form-urlencoded",
- )
-
-
-if __name__ == "__main__":
- ChannelUpdateProbe().run()
diff --git a/deploy/probers/login_page_probe.py b/deploy/probers/login_page_probe.py
deleted file mode 100755
index 42ed9a43e3..0000000000
--- a/deploy/probers/login_page_probe.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-from base import BaseProbe
-
-
-class LoginProbe(BaseProbe):
-
- metric = "login_latency_msec"
-
- def do_probe(self):
- self._login()
-
-
-if __name__ == "__main__":
- LoginProbe().run()
diff --git a/deploy/probers/postgres_probe.py b/deploy/probers/postgres_probe.py
deleted file mode 100755
index 3aa29acc0c..0000000000
--- a/deploy/probers/postgres_probe.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-import os
-
-import psycopg2
-from base import BaseProbe
-
-
-# Use dev options if no env set
-DB_HOST = os.getenv("DATA_DB_HOST") or "localhost"
-DB_PORT = 5432
-DB_NAME = os.getenv("DATA_DB_NAME") or "kolibri-studio"
-DB_USER = os.getenv("DATA_DB_USER") or "learningequality"
-DB_PASSWORD = os.getenv("DATA_DB_PASS") or "kolibri"
-TIMEOUT_SECONDS = 2
-
-
-class PostgresProbe(BaseProbe):
- metric = "postgres_latency_msec"
-
- def do_probe(self):
- conn = psycopg2.connect(
- host=DB_HOST,
- port=DB_PORT,
- dbname=DB_NAME,
- user=DB_USER,
- password=DB_PASSWORD,
- connect_timeout=TIMEOUT_SECONDS,
- )
- cur = conn.cursor()
- cur.execute("SELECT datname FROM pg_database;")
- cur.fetchone() # raises exception if cur.execute() produced no results
- conn.close()
-
-
-if __name__ == "__main__":
- PostgresProbe().run()
diff --git a/deploy/probers/postgres_read_contentnode_probe.py b/deploy/probers/postgres_read_contentnode_probe.py
deleted file mode 100755
index fa4767f404..0000000000
--- a/deploy/probers/postgres_read_contentnode_probe.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-import os
-
-import psycopg2
-from base import BaseProbe
-
-
-# Use dev options if no env set
-DB_HOST = os.getenv("DATA_DB_HOST") or "localhost"
-DB_PORT = 5432
-DB_NAME = os.getenv("DATA_DB_NAME") or "kolibri-studio"
-DB_USER = os.getenv("DATA_DB_USER") or "learningequality"
-DB_PASSWORD = os.getenv("DATA_DB_PASS") or "kolibri"
-TIMEOUT_SECONDS = 2
-
-
-class PostgresReadContentnodeProbe(BaseProbe):
- metric = "postgres_read_contentnode_latency_msec"
-
- def do_probe(self):
- conn = psycopg2.connect(
- host=DB_HOST,
- port=DB_PORT,
- dbname=DB_NAME,
- user=DB_USER,
- password=DB_PASSWORD,
- connect_timeout=TIMEOUT_SECONDS,
- )
- cur = conn.cursor()
- cur.execute("SELECT * FROM contentcuration_contentnode LIMIT 1;")
- num = cur.fetchone()
- conn.close()
- if not num:
- raise Exception("Reading a ContentNode in PostgreSQL database failed.")
-
-
-if __name__ == "__main__":
- PostgresReadContentnodeProbe().run()
diff --git a/deploy/probers/postgres_write_contentnode_probe.py b/deploy/probers/postgres_write_contentnode_probe.py
deleted file mode 100755
index 7785116fe4..0000000000
--- a/deploy/probers/postgres_write_contentnode_probe.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-import os
-from datetime import datetime
-
-import psycopg2
-from base import BaseProbe
-
-# Use dev options if no env set
-DB_HOST = os.getenv("DATA_DB_HOST") or "localhost"
-DB_PORT = 5432
-DB_NAME = os.getenv("DATA_DB_NAME") or "kolibri-studio"
-DB_USER = os.getenv("DATA_DB_USER") or "learningequality"
-DB_PASSWORD = os.getenv("DATA_DB_PASS") or "kolibri"
-TIMEOUT_SECONDS = 2
-
-
-class PostgresWriteContentnodeProbe(BaseProbe):
- metric = "postgres_write_contentnode_latency_msec"
-
- develop_only = True
-
- def do_probe(self):
- conn = psycopg2.connect(
- host=DB_HOST,
- port=DB_PORT,
- dbname=DB_NAME,
- user=DB_USER,
- password=DB_PASSWORD,
- connect_timeout=TIMEOUT_SECONDS,
- )
- cur = conn.cursor()
- now = datetime.now()
- cur.execute(
- """
- INSERT INTO contentcuration_contentnode(id, content_id, kind_id, title, description,sort_order, created,
- modified, changed, lft, rght, tree_id, level, published, node_id, freeze_authoring_data, publishing, role_visibility)
- VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
- """,
- (
- "testpostgreswriteprobe",
- "testprobecontentid",
- "topic",
- "test postgres write contentnode probe",
- "test postgres write contentnode probe",
- 1,
- now,
- now,
- True,
- 1,
- 1,
- 1,
- 1,
- False,
- "testprobenodeid",
- False,
- False,
- "test",
- ),
- )
- conn.close()
-
-
-if __name__ == "__main__":
- PostgresWriteContentnodeProbe().run()
diff --git a/deploy/probers/postmark_api_probe.py b/deploy/probers/postmark_api_probe.py
deleted file mode 100755
index 30cbb1741c..0000000000
--- a/deploy/probers/postmark_api_probe.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-import requests
-from base import BaseProbe
-
-POSTMARK_SERVICE_STATUS_URL = "https://status.postmarkapp.com/api/1.0/services"
-
-# (See here for API details: https://status.postmarkapp.com/api)
-ALL_POSSIBLE_STATUSES = ["UP", "MAINTENANCE", "DELAY", "DEGRADED", "DOWN"]
-
-PASSING_POSTMARK_STATUSES = {
- "/services/smtp": ["UP", "MAINTENANCE"],
- "/services/api": ALL_POSSIBLE_STATUSES,
- "/services/inbound": ALL_POSSIBLE_STATUSES,
- "/services/web": ALL_POSSIBLE_STATUSES,
-}
-
-
-class PostmarkProbe(BaseProbe):
- metric = "postmark_api_latency_msec"
-
- def do_probe(self):
- r = requests.get(url=POSTMARK_SERVICE_STATUS_URL)
- for service in r.json():
- allowed_statuses = PASSING_POSTMARK_STATUSES.get(service["url"])
- passing = service["status"] in allowed_statuses
-
- if passing:
- continue
- raise Exception(
- "Postmark's `%s` service has status %s, but we require one of the following: %s"
- % (service["name"], service["status"], allowed_statuses)
- )
-
-
-if __name__ == "__main__":
- PostmarkProbe().run()
diff --git a/deploy/probers/publishing_status_probe.py b/deploy/probers/publishing_status_probe.py
deleted file mode 100755
index fffe67eb92..0000000000
--- a/deploy/probers/publishing_status_probe.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-import datetime
-import os
-
-from base import BaseProbe
-from base import ProberException
-from base import PRODUCTION_MODE_ON
-
-
-ALERT_THRESHOLD = int(
- os.getenv("PROBER_PUBLISHING_ALERT_THRESHOLD") or 2 * 3600
-) # default = 2 hours
-DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
-
-
-class PublishingStatusProbe(BaseProbe):
-
- metric = "max_publishing_duration_sec"
- prober_name = "PUBLISHING_STATUS_PROBER"
-
- def run(self):
- if self.develop_only and PRODUCTION_MODE_ON:
- return
-
- r = self.request("api/probers/publishing_status/")
- results = r.json()
- now = datetime.datetime.now()
- max_duration = 0
- channel_ids = []
-
- for result in results:
- duration = (
- now - datetime.datetime.strptime(result["performed"], DATE_FORMAT)
- ).seconds
- max_duration = max(max_duration, duration)
- if duration >= ALERT_THRESHOLD or not result["task_id"]:
- channel_ids.append(result["channel_id"])
-
- if max_duration > 0:
- print( # noqa: T201
- "{metric_name} {duration_sec}".format(
- metric_name=self.metric, duration_sec=max_duration
- )
- )
-
- if channel_ids:
- raise ProberException(
- "Publishing alert for channels: {}".format(", ".join(channel_ids))
- )
-
-
-if __name__ == "__main__":
- PublishingStatusProbe().run()
diff --git a/deploy/probers/task_queue_probe.py b/deploy/probers/task_queue_probe.py
deleted file mode 100755
index 6148176856..0000000000
--- a/deploy/probers/task_queue_probe.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-from base import BaseProbe
-
-
-class TaskQueueProbe(BaseProbe):
-
- metric = "task_queue_ping_latency_msec"
- threshold = 50
-
- def do_probe(self):
- r = self.request("api/probers/task_queue_status/")
- r.raise_for_status()
- results = r.json()
-
- task_count = results.get("queued_task_count", 0)
- if task_count >= self.threshold:
- raise Exception(
- "Task queue length is over threshold! {} > {}".format(
- task_count, self.threshold
- )
- )
-
-
-if __name__ == "__main__":
- TaskQueueProbe().run()
diff --git a/deploy/probers/topic_creation_probe.py b/deploy/probers/topic_creation_probe.py
deleted file mode 100755
index 6c7090c598..0000000000
--- a/deploy/probers/topic_creation_probe.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-import json
-
-from base import BaseProbe
-from le_utils.constants import content_kinds
-
-
-class TopicCreationProbe(BaseProbe):
-
- metric = "topic_creation_latency_msec"
- develop_only = True
- prober_name = "TOPIC-CREATION-PROBER"
-
- def _get_channel(self):
- response = self.request("api/probers/get_prober_channel")
- return json.loads(response.content)
-
- def do_probe(self):
- channel = self._get_channel()
- payload = {
- "title": "Statistics and Probeability",
- "kind": content_kinds.TOPIC,
- }
- response = self.request(
- "api/contentnode", action="POST", data=json.dumps(payload)
- )
-
- # Test saving to channel works
- new_topic = json.loads(response.content)
- new_topic.update({"parent": channel["main_tree"]})
- path = "api/contentnode/{}".format(new_topic["id"])
- self.request(
- path,
- action="PUT",
- data=payload,
- contenttype="application/x-www-form-urlencoded",
- )
-
-
-if __name__ == "__main__":
- TopicCreationProbe().run()
diff --git a/deploy/probers/unapplied_changes_probe.py b/deploy/probers/unapplied_changes_probe.py
deleted file mode 100755
index 6065f3df28..0000000000
--- a/deploy/probers/unapplied_changes_probe.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-from base import BaseProbe
-
-
-class UnappliedChangesProbe(BaseProbe):
-
- metric = "unapplied__changes_ping_latency_msec"
-
- def do_probe(self):
- r = self.request("api/probers/unapplied_changes_status/")
- r.raise_for_status()
- results = r.json()
-
- active_task_count = results.get("active_task_count", 0)
- unapplied_changes_count = results.get("unapplied_changes_count", 0)
-
- if active_task_count == 0 and unapplied_changes_count > 0:
- raise Exception(
- "There are unapplied changes and no active tasks! {} unapplied changes".format(
- unapplied_changes_count
- )
- )
-
-
-if __name__ == "__main__":
- UnappliedChangesProbe().run()
diff --git a/deploy/probers/worker_probe.py b/deploy/probers/worker_probe.py
deleted file mode 100755
index 211dc2e6a1..0000000000
--- a/deploy/probers/worker_probe.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-from base import BaseProbe
-
-
-class WorkerProbe(BaseProbe):
-
- metric = "worker_ping_latency_msec"
-
- def do_probe(self):
- r = self.request("api/probers/celery_worker_status/")
- r.raise_for_status()
- results = r.json()
-
- active_workers = []
- for worker_hostname, worker_status in results.items():
- if "ok" in worker_status.keys():
- active_workers.append(worker_hostname)
-
- if not active_workers:
- raise Exception("No workers are running!")
-
-
-if __name__ == "__main__":
- WorkerProbe().run()
diff --git a/docker-compose.yml b/docker-compose.yml
index 3a07894c8d..719fc797ed 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -14,6 +14,7 @@ x-studio-environment:
CELERY_BROKER_ENDPOINT: redis
CELERY_RESULT_BACKEND_ENDPOINT: redis
CELERY_REDIS_PASSWORD: ""
+ REMAP_SIGTERM: "SIGQUIT"
PROBER_STUDIO_BASE_URL: http://studio-app:8080/{path}
x-studio-worker:
@@ -35,7 +36,7 @@ services:
platform: linux/amd64
build:
context: .
- dockerfile: k8s/images/nginx/Dockerfile
+ dockerfile: docker/Dockerfile.nginx.prod
ports:
- "8081:8080"
depends_on:
@@ -44,7 +45,7 @@ services:
studio-app:
<<: *studio-worker
- entrypoint: python docker/entrypoint.py
+ entrypoint: python docker/studio-dev/entrypoint.py
command: pnpm run devserver
ports:
- "8080:8080"
@@ -83,17 +84,6 @@ services:
redis:
image: redis:6.0.9
- cloudprober:
- <<: *studio-worker
- working_dir: /src/deploy
- entrypoint: ""
- # sleep 30 seconds allowing some time for the studio app to start up
- command: '/bin/bash -c "sleep 30 && /bin/cloudprober --config_file ./cloudprober.cfg"'
- # wait until the main app and celery worker have started
- depends_on:
- - studio-app
- - celery-worker
-
volumes:
minio:
diff --git a/docker/Dockerfile.demo b/docker/Dockerfile.demo
deleted file mode 100644
index 2ae03758b6..0000000000
--- a/docker/Dockerfile.demo
+++ /dev/null
@@ -1,43 +0,0 @@
-FROM python:3.10-slim-bookworm
-
-# Set the timezone
-RUN ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
-
-ENV DEBIAN_FRONTEND noninteractive
-# Default Python file.open file encoding to UTF-8 instead of ASCII, workaround for le-utils setup.py issue
-ENV LANG C.UTF-8
-RUN apt-get update && apt-get -y install python3-pip python3-dev gcc libpq-dev make git curl libjpeg-dev libssl-dev libffi-dev ffmpeg
-
-# Pin, Download and install node 18.x
-RUN apt-get update \
- && apt-get install -y ca-certificates curl gnupg \
- && mkdir -p /etc/apt/keyrings \
- && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
- && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \
- && echo "Package: nodejs" >> /etc/apt/preferences.d/preferences \
- && echo "Pin: origin deb.nodesource.com" >> /etc/apt/preferences.d/preferences \
- && echo "Pin-Priority: 1001" >> /etc/apt/preferences.d/preferences\
- && apt-get update \
- && apt-get install -y nodejs
-
-RUN corepack enable pnpm
-COPY ./package.json .
-COPY ./pnpm-lock.yaml .
-RUN pnpm install
-
-COPY requirements.txt .
-
-RUN pip install --upgrade pip
-RUN pip install --ignore-installed -r requirements.txt
-
-COPY . /contentcuration/
-WORKDIR /contentcuration
-
-# generate the node bundles
-RUN mkdir -p contentcuration/static/js/bundles
-RUN ln -s /node_modules /contentcuration/node_modules
-RUN pnpm run build
-
-EXPOSE 8000
-
-ENTRYPOINT ["make", "altprodserver"]
diff --git a/docker/Dockerfile.nginx.prod b/docker/Dockerfile.nginx.prod
new file mode 100644
index 0000000000..ba33210a28
--- /dev/null
+++ b/docker/Dockerfile.nginx.prod
@@ -0,0 +1,15 @@
+FROM nginx:1.25
+
+# Build from inside the directory by overriding this.
+ARG SRC_DIR=docker/nginx
+
+RUN rm /etc/nginx/conf.d/* # if there's stuff here, nginx won't read sites-enabled
+COPY ${SRC_DIR}/nginx.conf /etc/nginx/nginx.conf
+COPY ${SRC_DIR}/includes /etc/nginx/includes
+COPY ${SRC_DIR}/entrypoint.sh /usr/bin
+
+# Really seems like it _should_ be here, as it's referenced by `nginx.conf`.
+# But it's hasn't been for years.
+# COPY ${SRC_DIR}/mime.types /etc/nginx/mime.types
+
+CMD ["entrypoint.sh"]
diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod
deleted file mode 120000
index 11036b6d36..0000000000
--- a/docker/Dockerfile.prod
+++ /dev/null
@@ -1 +0,0 @@
-../k8s/images/app/Dockerfile
\ No newline at end of file
diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod
new file mode 100644
index 0000000000..a9477f90ed
--- /dev/null
+++ b/docker/Dockerfile.prod
@@ -0,0 +1,47 @@
+FROM python:3.10-slim-bookworm
+
+# Set the timezone
+RUN ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
+
+ENV DEBIAN_FRONTEND noninteractive
+# Default Python file.open file encoding to UTF-8 instead of ASCII, workaround for le-utils setup.py issue
+ENV LANG C.UTF-8
+RUN apt-get update && apt-get -y install python3-pip python3-dev gcc libpq-dev libssl-dev libffi-dev make git curl libjpeg-dev ffmpeg
+
+# Pin, Download and install node 18.x
+RUN apt-get update \
+ && apt-get install -y ca-certificates curl gnupg \
+ && mkdir -p /etc/apt/keyrings \
+ && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
+ && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \
+ && echo "Package: nodejs" >> /etc/apt/preferences.d/preferences \
+ && echo "Pin: origin deb.nodesource.com" >> /etc/apt/preferences.d/preferences \
+ && echo "Pin-Priority: 1001" >> /etc/apt/preferences.d/preferences\
+ && apt-get update \
+ && apt-get install -y nodejs
+
+RUN corepack enable pnpm
+
+COPY ./package.json .
+COPY ./pnpm-lock.yaml .
+RUN pnpm install
+
+COPY requirements.txt .
+
+RUN pip install --upgrade pip
+RUN pip install --ignore-installed -r requirements.txt
+
+COPY . /contentcuration/
+WORKDIR /contentcuration
+
+# generate the node bundles
+RUN mkdir -p contentcuration/static/js/bundles
+RUN ln -s /node_modules /contentcuration/node_modules
+RUN pnpm run build
+
+ARG COMMIT_SHA
+ENV RELEASE_COMMIT_SHA=$COMMIT_SHA
+
+EXPOSE 8000
+
+ENTRYPOINT ["make", "altprodserver"]
diff --git a/k8s/images/nginx/entrypoint.sh b/docker/nginx/entrypoint.sh
similarity index 100%
rename from k8s/images/nginx/entrypoint.sh
rename to docker/nginx/entrypoint.sh
diff --git a/deploy/includes/README.md b/docker/nginx/includes/README.md
similarity index 100%
rename from deploy/includes/README.md
rename to docker/nginx/includes/README.md
diff --git a/deploy/includes/content/_proxy.conf b/docker/nginx/includes/content/_proxy.conf
similarity index 100%
rename from deploy/includes/content/_proxy.conf
rename to docker/nginx/includes/content/_proxy.conf
diff --git a/deploy/includes/content/default.conf b/docker/nginx/includes/content/default.conf
similarity index 95%
rename from deploy/includes/content/default.conf
rename to docker/nginx/includes/content/default.conf
index 404bd64075..c2c95df613 100644
--- a/deploy/includes/content/default.conf
+++ b/docker/nginx/includes/content/default.conf
@@ -1,4 +1,4 @@
-# DO NOT RENAME: referenced by k8s/images/nginx/entrypoint.sh
+# DO NOT RENAME: referenced by docker/nginx/entrypoint.sh
# assume development
location @emulator {
diff --git a/deploy/includes/content/develop-studio-content.conf b/docker/nginx/includes/content/develop-studio-content.conf
similarity index 100%
rename from deploy/includes/content/develop-studio-content.conf
rename to docker/nginx/includes/content/develop-studio-content.conf
diff --git a/deploy/includes/content/studio-content.conf b/docker/nginx/includes/content/studio-content.conf
similarity index 100%
rename from deploy/includes/content/studio-content.conf
rename to docker/nginx/includes/content/studio-content.conf
diff --git a/deploy/mime.types b/docker/nginx/mime.types
similarity index 100%
rename from deploy/mime.types
rename to docker/nginx/mime.types
diff --git a/deploy/nginx.conf b/docker/nginx/nginx.conf
similarity index 100%
rename from deploy/nginx.conf
rename to docker/nginx/nginx.conf
diff --git a/docker/entrypoint.py b/docker/studio-dev/entrypoint.py
similarity index 100%
rename from docker/entrypoint.py
rename to docker/studio-dev/entrypoint.py
diff --git a/k8s/Chart.lock b/k8s/Chart.lock
deleted file mode 100644
index 3710092dd1..0000000000
--- a/k8s/Chart.lock
+++ /dev/null
@@ -1,9 +0,0 @@
-dependencies:
-- name: cloudsql-proxy
- repository: https://storage.googleapis.com/t3n-helm-charts
- version: 2.0.0
-- name: redis
- repository: https://charts.bitnami.com/bitnami
- version: 12.1.1
-digest: sha256:8e1cf67168047aa098bae2eca9ddae32bb68ba68ca80668492760037fe8ce4ef
-generated: "2020-11-25T04:47:31.298097908-08:00"
diff --git a/k8s/Chart.yaml b/k8s/Chart.yaml
deleted file mode 100644
index 0395068cfc..0000000000
--- a/k8s/Chart.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v2
-description: Kolibri Studio, the Content Curation tool for Kolibri!
-name: studio
-version: 0.3.0
-dependencies:
- - name: cloudsql-proxy
- version: 2.0.0
- repository: https://storage.googleapis.com/t3n-helm-charts
- enabled: true
- - name: redis
- version: 12.1.1
- repository: https://charts.bitnami.com/bitnami
- enabled: true
diff --git a/k8s/Makefile b/k8s/Makefile
deleted file mode 100644
index c81ba867d9..0000000000
--- a/k8s/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-DEPLOYMENT := `kubectl get deploy -l app=master-studio -o custom-columns=NAME:.metadata.name --no-headers`
-POD := `kubectl get pods -o=custom-columns=NAME:.metadata.name --field-selector=status.phase=Running --no-headers -l app=master-studio | head -n 1`
-
-master-shell:
- kubectl rollout status deployment/$(DEPLOYMENT)
- echo Running bash inside $(POD)
- kubectl exec -it $(POD) bash
diff --git a/k8s/charts/cloudsql-proxy-2.0.0.tgz b/k8s/charts/cloudsql-proxy-2.0.0.tgz
deleted file mode 100644
index 29a9046b19..0000000000
Binary files a/k8s/charts/cloudsql-proxy-2.0.0.tgz and /dev/null differ
diff --git a/k8s/charts/redis-12.1.1.tgz b/k8s/charts/redis-12.1.1.tgz
deleted file mode 100644
index b6735330f3..0000000000
Binary files a/k8s/charts/redis-12.1.1.tgz and /dev/null differ
diff --git a/k8s/create-cloudsql-database.sh b/k8s/create-cloudsql-database.sh
deleted file mode 100755
index f022586047..0000000000
--- a/k8s/create-cloudsql-database.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-set -e
-
-DBNAME=$1
-
-INSTANCE=$2
-
-DATABASES=`gcloud sql databases list --instance=${INSTANCE} | awk '{print $1}' | tail -n +2`
-
-EXISTENCE=False
-
-for word in ${DATABASES}; do
- if [[ ${word} = ${DBNAME} ]];
- then
- echo "Database ${DBNAME} exists in SQL instance ${INSTANCE}."
- EXISTENCE=True
- break
- fi
-done
-
-
-if [[ ${EXISTENCE} = False ]];
-then
- echo "Creating database ${DBNAME} in SQL instance ${INSTANCE}."
- gcloud sql databases create ${DBNAME} --instance=${INSTANCE}
-fi
diff --git a/k8s/create-cloudsql-proxy.sh b/k8s/create-cloudsql-proxy.sh
deleted file mode 100755
index e9adfd0c8b..0000000000
--- a/k8s/create-cloudsql-proxy.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env sh
-# Arguments:
-# $1: The Helm release name. You'll see this name inside Kubernetes.
-# $2: the path to the service account JSON file that has access to Cloud SQL.
-# $3: Cloud SQL instance name
-# $4: GCP project id
-# $5: Cloud SQL region
-
-set -xe
-
-# Install the helm server side component
-helm init
-
-# Install the Global cluster sql proxy. Create one for each
-# Cloud SQL database you want to connect to.
-helm upgrade $1 stable/gcloud-sqlproxy --namespace sqlproxy \
- --set serviceAccountKey="$(cat $2 | base64)" \
- --set cloudsql.instances[0].instance=$3 \
- --set cloudsql.instances[0].project=$4 \
- --set cloudsql.instances[0].region=$5 \
- --set cloudsql.instances[0].port=5432 -i
diff --git a/k8s/create-multiplexing-reverse-proxy-lb.sh b/k8s/create-multiplexing-reverse-proxy-lb.sh
deleted file mode 100755
index 22f6ede359..0000000000
--- a/k8s/create-multiplexing-reverse-proxy-lb.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env sh
-# This script creates a Traefik load balancer, a single
-# set of servers that we'll use to multiplex between different
-# versions of studio.
-#
-# We want to use this vs. Google Cloud native load balancers, b/c
-# each gcloud lb is $20/month. Using an internal traefik load balancer
-# means we only need a static IP, and traefik does the load balancing.
-
-# Arguments:
-# $1: The Helm release name. You'll see this name inside Kubernetes.
-# $2: the load balancer's external IP (xxx.xxx.xxx.xxx). Make sure to reserve this first on GCP.
-# $3: The Cloudflare email address. Used to perform automated letsencrypt verification.
-# $4: The API key for the given Cloudflare email.
-
-
-set -xe
-
-helm init
-
-helm upgrade $1 stable/traefik --namespace kube-system \
- --set loadBalancerIP=$2 \
- --set gzip.enabled=false \
- --set acme.enabled=true \
- --set ssl.enabled=true \
- --set acme.challengeType=dns-01 \
- --set acme.dnsProvider.name=cloudflare \
- --set acme.dnsProvider.cloudflare.CLOUDFLARE_EMAIL=$3 \
- --set acme.dnsProvider.cloudflare.CLOUDFLARE_API_KEY=$4 \
- --set acme.email='admins@learningequality.org' \
- --set cpuRequest=1000m \
- --set memoryRequest=1Gi \
- --set cpuLimit=2000m \
- --set memoryLimit=2Gi \
- --set acme.staging=false \
- --set dashboard.enabled=true \
- --set dashboard.domain=traefik-lb-ui.cd.learningequality.org \
- -i
diff --git a/k8s/create-postgres-user-and-db.exp b/k8s/create-postgres-user-and-db.exp
deleted file mode 100755
index af22daf2d2..0000000000
--- a/k8s/create-postgres-user-and-db.exp
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/local/bin/expect -f
-#
-# This Expect script was generated by autoexpect on Tue Jun 26 17:27:40 2018
-# Expect and autoexpect were both written by Don Libes, NIST.
-#
-# Note that autoexpect does not guarantee a working script. It
-# necessarily has to guess about certain things. Two reasons a script
-# might fail are:
-#
-# 1) timing - A surprising number of programs (rn, ksh, zsh, telnet,
-# etc.) and devices discard or ignore keystrokes that arrive "too
-# quickly" after prompts. If you find your new script hanging up at
-# one spot, try adding a short sleep just before the previous send.
-# Setting "force_conservative" to 1 (see below) makes Expect do this
-# automatically - pausing briefly before sending each character. This
-# pacifies every program I know of. The -c flag makes the script do
-# this in the first place. The -C flag allows you to define a
-# character to toggle this mode off and on.
-
-set force_conservative 0 ;# set to 1 to force conservative mode even if
- ;# script wasn't run conservatively originally
-if {$force_conservative} {
- set send_slow {1 .1}
- proc send {ignore arg} {
- sleep .1
- exp_send -s -- $arg
- }
-}
-
-#
-# 2) differing output - Some programs produce different output each time
-# they run. The "date" command is an obvious example. Another is
-# ftp, if it produces throughput statistics at the end of a file
-# transfer. If this causes a problem, delete these patterns or replace
-# them with wildcards. An alternative is to use the -p flag (for
-# "prompt") which makes Expect only look for the last line of output
-# (i.e., the prompt). The -P flag allows you to define a character to
-# toggle this mode off and on.
-#
-# Read the man page for more info.
-#
-# -Don
-
-# Aron: call this script with the following arguments
-# $1: the name of both the user and the database they control
-# $2: the postgres user's password. Needed to create the user.
-set user [lindex $argv 0]
-set db [lindex $argv 0]
-set postgres_password [lindex $argv 1]
-
-
-set timeout -1
-spawn gcloud sql connect studio-qa --project ops-central
-match_max 100000
-expect -exact "Password for user postgres: "
-send -- "$postgres_password\n"
-expect -exact "postgres=> "
-send -- "CREATE DATABASE $db; CREATE USER $user WITH ENCRYPTED PASSWORD '$user'; GRANT ALL PRIVILEGES ON DATABASE $db TO $user;"
-send -- "\n"
-expect -exact "postgres=>"
-send -- ""
-expect eof
diff --git a/k8s/encrypt-env-var.sh b/k8s/encrypt-env-var.sh
deleted file mode 100755
index c20e0774b0..0000000000
--- a/k8s/encrypt-env-var.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# How to use:
-# pipe a secret string into this script.
-# This will output instructions on what you
-# then need to add into your cloudbuild.yaml file.
-
-KEYRING=$1
-KEY=$2
-
-gcloud kms encrypt \
- --plaintext-file=- \
- --ciphertext-file=- \
- --location=global \
- --keyring=$KEYRING \
- --key=$KEY \
- | base64
diff --git a/k8s/helm-deploy.sh b/k8s/helm-deploy.sh
deleted file mode 100755
index 4f512559fd..0000000000
--- a/k8s/helm-deploy.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env bash
-
-set -euo pipefail
-
-RELEASE_NAME=$1
-STUDIO_APP_IMAGE_NAME=$2
-STUDIO_NGINX_IMAGE_NAME=$3
-STUDIO_BUCKET_NAME=$4
-COMMIT_SHA=$5
-PROJECT_ID=$6
-DATABASE_INSTANCE_NAME=$7
-DATABASE_REGION=$8
-
-K8S_DIR=$(dirname $0)
-
-function get_secret {
- gcloud secrets versions access --secret=$1 latest
-}
-
-helm upgrade --install \
- --namespace $RELEASE_NAME --create-namespace \
- --set studioApp.postmarkApiKey=$(get_secret postmark-api-key) \
- --set studioApp.releaseCommit=$COMMIT_SHA \
- --set studioApp.imageName=$STUDIO_APP_IMAGE_NAME \
- --set studioNginx.imageName=$STUDIO_NGINX_IMAGE_NAME \
- --set studioApp.gcs.bucketName=$STUDIO_BUCKET_NAME \
- --set studioApp.gcs.writerServiceAccountKeyBase64Encoded=$(get_secret studio-gcs-service-account-key | base64 -w 0) \
- --set settings=contentcuration.production_settings \
- --set sentry.dsnKey=$(get_secret sentry-dsn-key) \
- --set redis.password=$(get_secret redis-password) \
- --set cloudsql-proxy.credentials.username=$(get_secret postgres-username) \
- --set cloudsql-proxy.credentials.password=$(get_secret postgres-password) \
- --set cloudsql-proxy.credentials.dbname=$(get_secret postgres-dbname) \
- --set cloudsql-proxy.cloudsql.instances[0].instance=$DATABASE_INSTANCE_NAME \
- --set cloudsql-proxy.cloudsql.instances[0].project=$PROJECT_ID \
- --set cloudsql-proxy.cloudsql.instances[0].region=$DATABASE_REGION \
- --set cloudsql-proxy.cloudsql.instances[0].port=5432 \
- $RELEASE_NAME $K8S_DIR
diff --git a/k8s/images/app/Dockerfile b/k8s/images/app/Dockerfile
deleted file mode 100644
index a9477f90ed..0000000000
--- a/k8s/images/app/Dockerfile
+++ /dev/null
@@ -1,47 +0,0 @@
-FROM python:3.10-slim-bookworm
-
-# Set the timezone
-RUN ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
-
-ENV DEBIAN_FRONTEND noninteractive
-# Default Python file.open file encoding to UTF-8 instead of ASCII, workaround for le-utils setup.py issue
-ENV LANG C.UTF-8
-RUN apt-get update && apt-get -y install python3-pip python3-dev gcc libpq-dev libssl-dev libffi-dev make git curl libjpeg-dev ffmpeg
-
-# Pin, Download and install node 18.x
-RUN apt-get update \
- && apt-get install -y ca-certificates curl gnupg \
- && mkdir -p /etc/apt/keyrings \
- && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
- && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \
- && echo "Package: nodejs" >> /etc/apt/preferences.d/preferences \
- && echo "Pin: origin deb.nodesource.com" >> /etc/apt/preferences.d/preferences \
- && echo "Pin-Priority: 1001" >> /etc/apt/preferences.d/preferences\
- && apt-get update \
- && apt-get install -y nodejs
-
-RUN corepack enable pnpm
-
-COPY ./package.json .
-COPY ./pnpm-lock.yaml .
-RUN pnpm install
-
-COPY requirements.txt .
-
-RUN pip install --upgrade pip
-RUN pip install --ignore-installed -r requirements.txt
-
-COPY . /contentcuration/
-WORKDIR /contentcuration
-
-# generate the node bundles
-RUN mkdir -p contentcuration/static/js/bundles
-RUN ln -s /node_modules /contentcuration/node_modules
-RUN pnpm run build
-
-ARG COMMIT_SHA
-ENV RELEASE_COMMIT_SHA=$COMMIT_SHA
-
-EXPOSE 8000
-
-ENTRYPOINT ["make", "altprodserver"]
diff --git a/k8s/images/app/Dockerfile b/k8s/images/app/Dockerfile
new file mode 120000
index 0000000000..4750df1fc3
--- /dev/null
+++ b/k8s/images/app/Dockerfile
@@ -0,0 +1 @@
+docker/Dockerfile.prod
\ No newline at end of file
diff --git a/k8s/images/app/Makefile b/k8s/images/app/Makefile
deleted file mode 100644
index d958cc266a..0000000000
--- a/k8s/images/app/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-COMMIT := nlatest
-
-imagebuild:
- docker build ../../../ -f $$PWD/Dockerfile -t gcr.io/github-learningequality-studio/app:$(COMMIT)
diff --git a/k8s/images/nginx/Dockerfile b/k8s/images/nginx/Dockerfile
deleted file mode 100644
index ab38a1118a..0000000000
--- a/k8s/images/nginx/Dockerfile
+++ /dev/null
@@ -1,8 +0,0 @@
-FROM nginx:1.25
-
-RUN rm /etc/nginx/conf.d/* # if there's stuff here, nginx won't read sites-enabled
-COPY deploy/nginx.conf /etc/nginx/nginx.conf
-COPY deploy/includes /etc/nginx/includes
-COPY k8s/images/nginx/entrypoint.sh /usr/bin
-
-CMD ["entrypoint.sh"]
diff --git a/k8s/images/nginx/Dockerfile b/k8s/images/nginx/Dockerfile
new file mode 120000
index 0000000000..a4867c19b0
--- /dev/null
+++ b/k8s/images/nginx/Dockerfile
@@ -0,0 +1 @@
+docker/Dockerfile.nginx.prod
\ No newline at end of file
diff --git a/k8s/images/nginx/Makefile b/k8s/images/nginx/Makefile
deleted file mode 100644
index 9c2d01dc60..0000000000
--- a/k8s/images/nginx/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-CONTAINER_NAME := "contentworkshop-app-nginx-proxy"
-CONTAINER_VERSION := v4
-GCLOUD_PROJECT := contentworkshop-159920
-GIT_PROJECT_ROOT := `git rev-parse --show-toplevel`
-
-all: appcodeupdate imagebuild imagepush
-
-imagebuild:
- docker build -t learningequality/$(CONTAINER_NAME):$(CONTAINER_VERSION) -f ./Dockerfile $(GIT_PROJECT_ROOT)
diff --git a/k8s/images/prober/Dockerfile b/k8s/images/prober/Dockerfile
deleted file mode 100644
index d3d18ee8a6..0000000000
--- a/k8s/images/prober/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM ubuntu:bionic
-
-RUN apt-get update && apt-get install -y curl python-pip unzip
-
-RUN pip install requests>=2.20.0 && pip install psycopg2-binary==2.7.4 && pip install le-utils>=0.1.19
-
-COPY ./deploy/cloudprober.cfg /deploy/
-COPY ./deploy/prober-entrypoint.sh /deploy/
-COPY ./deploy/probers /deploy/probers/
diff --git a/k8s/templates/_helpers.tpl b/k8s/templates/_helpers.tpl
deleted file mode 100644
index 1098c07dc7..0000000000
--- a/k8s/templates/_helpers.tpl
+++ /dev/null
@@ -1,134 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "studio.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "studio.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{- define "cloudsql-proxy.fullname" -}}
-{{- $name := .Release.Name -}}
-{{- printf "%s-%s" $name "cloudsql-proxy" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- define "redis.fullname" -}}
-{{- $name := .Release.Name -}}
-{{- printf "%s-%s" $name "redis" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{- define "minio.url" -}}
-{{- printf "http://%s-%s:%v" .Release.Name "minio" .Values.minio.service.port -}}
-{{- end -}}
-
-
-{{/*
-Return the appropriate apiVersion for networkpolicy.
-*/}}
-{{- define "studio.networkPolicy.apiVersion" -}}
-{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
-"extensions/v1"
-{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}}
-"networking.k8s.io/v1"
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "studio.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Generate chart secret name
-*/}}
-{{- define "studio.secretName" -}}
-{{ default (include "studio.fullname" .) .Values.existingSecret }}
-{{- end -}}
-
-{{/*
-Generate the shared environment variables between studio app and workers
-*/}}
-{{- define "studio.sharedEnvs" -}}
-- name: DJANGO_SETTINGS_MODULE
- value: {{ .Values.settings }}
-- name: DJANGO_LOG_FILE
- value: /var/log/django.log
-- name: MPLBACKEND
- value: PS
-- name: STUDIO_BETA_MODE
- value: "yes"
-- name: RUN_MODE
- value: k8s
-- name: DATA_DB_NAME
- valueFrom:
- secretKeyRef:
- key: postgres-database
- name: {{ template "studio.fullname" . }}
-- name: DATA_DB_PORT
- value: "5432"
-- name: DATA_DB_USER
- valueFrom:
- secretKeyRef:
- key: postgres-user
- name: {{ template "studio.fullname" . }}
-- name: DATA_DB_PASS
- valueFrom:
- secretKeyRef:
- key: postgres-password
- name: {{ template "studio.fullname" . }}
-- name: CELERY_TIMEZONE
- value: America/Los_Angeles
-- name: CELERY_REDIS_DB
- value: "0"
-- name: CELERY_BROKER_ENDPOINT
- value: {{ template "redis.fullname" . }}-master
-- name: CELERY_RESULT_BACKEND_ENDPOINT
- value: {{ template "redis.fullname" . }}-master
-- name: CELERY_REDIS_PASSWORD
- valueFrom:
- secretKeyRef:
- key: redis-password
- name: {{ template "studio.fullname" . }}
-- name: AWS_S3_ENDPOINT_URL
- value: https://storage.googleapis.com
-- name: RELEASE_COMMIT_SHA
- value: {{ .Values.studioApp.releaseCommit | default "" }}
-- name: BRANCH_ENVIRONMENT
- value: {{ .Release.Name }}
-- name: SENTRY_DSN_KEY
- valueFrom:
- secretKeyRef:
- key: sentry-dsn-key
- name: {{ template "studio.fullname" . }}
- optional: true
-- name: AWS_BUCKET_NAME
- value: {{ .Values.studioApp.gcs.bucketName }}
-- name: EMAIL_CREDENTIALS_POSTMARK_API_KEY
- {{ if .Values.studioApp.postmarkApiKey }}
- valueFrom:
- secretKeyRef:
- key: postmark-api-key
- name: {{ template "studio.fullname" . }}
- {{ else }}
- value: ""
- {{ end }}
-
-{{- end -}}
diff --git a/k8s/templates/garbage-collect-cronjob.yaml b/k8s/templates/garbage-collect-cronjob.yaml
deleted file mode 100644
index 4395732541..0000000000
--- a/k8s/templates/garbage-collect-cronjob.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "studio.fullname" . }}-garbage-collect-job-config
- labels:
- tier: job
- app: {{ template "studio.fullname" . }}
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-data:
- DJANGO_LOG_FILE: /var/log/django.log
- DATA_DB_HOST: {{ template "cloudsql-proxy.fullname" . }}
- DATA_DB_PORT: "5432"
- MPLBACKEND: PS
- RUN_MODE: k8s
- RELEASE_COMMIT_SHA: {{ .Values.studioApp.releaseCommit | default "" }}
- BRANCH_ENVIRONMENT: {{ .Release.Name }}
- AWS_BUCKET_NAME: {{ .Values.studioApp.gcs.bucketName }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "studio.fullname" . }}-garbage-collect-job-secret
- labels:
- app: {{ template "studio.fullname" . }}
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-type: Opaque
-data:
- DATA_DB_USER: {{ index .Values "cloudsql-proxy" "credentials" "username" | b64enc }}
- DATA_DB_PASS: {{ index .Values "cloudsql-proxy" "credentials" "password" | b64enc }}
- DATA_DB_NAME: {{ index .Values "cloudsql-proxy" "credentials" "dbname" | b64enc }}
- SENTRY_DSN_KEY: {{ .Values.sentry.dsnKey | b64enc }}
----
-apiVersion: batch/v1beta1
-kind: CronJob
-metadata:
- name: {{ template "studio.fullname" . }}-garbage-collect-cronjob
- labels:
- tier: job
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-spec:
- schedule: "@midnight"
- jobTemplate:
- spec:
- template:
- spec:
- restartPolicy: OnFailure
- containers:
- - name: app
- image: {{ .Values.studioApp.imageName }}
- command:
- - python
- - contentcuration/manage.py
- - garbage_collect
- env:
- - name: DJANGO_SETTINGS_MODULE
- value: contentcuration.production_settings
- envFrom:
- - configMapRef:
- name: {{ template "studio.fullname" . }}-garbage-collect-job-config
- - secretRef:
- name: {{ template "studio.fullname" . }}-garbage-collect-job-secret
- resources:
- requests:
- cpu: 0.5
- memory: 1Gi
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: full-gcp-access-scope
- operator: In
- values:
- - "true"
diff --git a/k8s/templates/ingress.yaml b/k8s/templates/ingress.yaml
deleted file mode 100644
index c2e199dc7a..0000000000
--- a/k8s/templates/ingress.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-apiVersion: networking.k8s.io/v1beta1
-kind: Ingress
-metadata:
- name: {{ template "studio.fullname" . }}
- labels:
- app: {{ template "studio.fullname" . }}
- tier: ingress
- annotations:
- ingress.kubernetes.io/rewrite-target: /
- kubernetes.io/ingress.class: "nginx"
- ingressClassName: "nginx"
-
-spec:
- rules:
- - host: {{.Release.Name}}.studio.cd.learningequality.org
- http:
- paths:
- - backend:
- serviceName: {{ template "studio.fullname" . }}-app
- servicePort: 80
diff --git a/k8s/templates/job-template.yaml b/k8s/templates/job-template.yaml
deleted file mode 100644
index 856f27371c..0000000000
--- a/k8s/templates/job-template.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "studio.fullname" . }}-db-migrate-config
- labels:
- app: {{ template "studio.fullname" . }}
- annotations:
- "helm.sh/hook": pre-install,pre-upgrade
- "helm.sh/hook-delete-policy": before-hook-creation
-data:
- DJANGO_SETTINGS_MODULE: {{ .Values.settings }}
- DJANGO_LOG_FILE: /var/log/django.log
- DATA_DB_HOST: {{ template "cloudsql-proxy.fullname" . }}
- DATA_DB_PORT: "5432"
- MPLBACKEND: PS
- STUDIO_BETA_MODE: "yes"
- RUN_MODE: k8s
- RELEASE_COMMIT_SHA: {{ .Values.studioApp.releaseCommit | default "" }}
- BRANCH_ENVIRONMENT: {{ .Release.Name }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "studio.fullname" . }}-db-migrate-secrets
- labels:
- app: studio
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
- annotations:
- "helm.sh/hook": pre-install,pre-upgrade
- "helm.sh/hook-delete-policy": before-hook-creation
-type: Opaque
-data:
- DATA_DB_USER: {{ index .Values "cloudsql-proxy" "credentials" "username" | b64enc }}
- DATA_DB_PASS: {{ index .Values "cloudsql-proxy" "credentials" "password" | b64enc }}
- DATA_DB_NAME: {{ index .Values "cloudsql-proxy" "credentials" "dbname" | b64enc }}
- SENTRY_DSN_KEY: {{ .Values.sentry.dsnKey | b64enc }}
----
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: {{ template "studio.fullname" . }}-migrate-job
- labels:
- app: {{ template "studio.fullname" . }}
- annotations:
- "helm.sh/hook": post-install,pre-upgrade
- "helm.sh/hook-delete-policy": before-hook-creation
-spec:
- template:
- spec:
- restartPolicy: OnFailure
- containers:
- - name: dbmigrate
- image: {{ .Values.studioApp.imageName }}
- command:
- - make
- - migrate
- envFrom:
- - configMapRef:
- name: {{ template "studio.fullname" . }}-db-migrate-config
- - secretRef:
- name: {{ template "studio.fullname" . }}-db-migrate-secrets
- env:
- - name: DJANGO_SETTINGS_MODULE
- value: contentcuration.migration_production_settings
- resources:
- requests:
- cpu: 1
- memory: 2Gi
- limits:
- cpu: 1
- memory: 2Gi
diff --git a/k8s/templates/mark-incomplete-mgmt-command-cronjob.yaml b/k8s/templates/mark-incomplete-mgmt-command-cronjob.yaml
deleted file mode 100644
index ad36b8b0e4..0000000000
--- a/k8s/templates/mark-incomplete-mgmt-command-cronjob.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "studio.fullname" . }}-mark-incomplete-job-config
- labels:
- tier: job
- app: {{ template "studio.fullname" . }}
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-data:
- DJANGO_LOG_FILE: /var/log/django.log
- DATA_DB_HOST: {{ template "cloudsql-proxy.fullname" . }}
- DATA_DB_PORT: "5432"
- MPLBACKEND: PS
- RUN_MODE: k8s
- RELEASE_COMMIT_SHA: {{ .Values.studioApp.releaseCommit | default "" }}
- BRANCH_ENVIRONMENT: {{ .Release.Name }}
- AWS_BUCKET_NAME: {{ .Values.studioApp.gcs.bucketName }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "studio.fullname" . }}-mark-incomplete-job-secrets
- labels:
- app: {{ template "studio.fullname" . }}
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-type: Opaque
-data:
- DATA_DB_USER: {{ index .Values "cloudsql-proxy" "credentials" "username" | b64enc }}
- DATA_DB_PASS: {{ index .Values "cloudsql-proxy" "credentials" "password" | b64enc }}
- DATA_DB_NAME: {{ index .Values "cloudsql-proxy" "credentials" "dbname" | b64enc }}
- SENTRY_DSN_KEY: {{ .Values.sentry.dsnKey | b64enc }}
----
-apiVersion: batch/v1beta1
-kind: CronJob
-metadata:
- name: mark-incomplete-cronjob
- labels:
- tier: job
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-spec:
- schedule: "00 12 10 */36 *"
- jobTemplate:
- spec:
- template:
- spec:
- restartPolicy: OnFailure
- containers:
- - name: app
- image: {{ .Values.studioApp.imageName }}
- command:
- - python
- - contentcuration/manage.py
- - mark_incomplete
- env:
- - name: DJANGO_SETTINGS_MODULE
- value: contentcuration.production_settings
- envFrom:
- - configMapRef:
- name: {{ template "studio.fullname" . }}-mark-incomplete-job-config
- - secretRef:
- name: {{ template "studio.fullname" . }}-mark-incomplete-job-secrets
- resources:
- requests:
- cpu: 0.5
- memory: 1Gi
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: full-gcp-access-scope
- operator: In
- values:
- - "true"
diff --git a/k8s/templates/production-ingress.yaml b/k8s/templates/production-ingress.yaml
deleted file mode 100644
index 68f10481ba..0000000000
--- a/k8s/templates/production-ingress.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-{{- if .Values.productionIngress -}}
----
-apiVersion: networking.k8s.io/v1beta1
-kind: Ingress
-metadata:
- name: {{ template "studio.fullname" . }}-production
- labels:
- app: {{ template "studio.fullname" . }}
- tier: ingress
- type: production
- annotations:
- ingress.kubernetes.io/rewrite-target: /
- kubernetes.io/ingress.class: "nginx"
- ingressClassName: "nginx"
-spec:
- rules:
- - host: {{.Release.Name}}.studio.learningequality.org
- http:
- paths:
- - backend:
- serviceName: {{ template "studio.fullname" . }}-app
- servicePort: 80
-{{- end }}
diff --git a/k8s/templates/set-storage-used-mgmt-command-cronjob.yaml b/k8s/templates/set-storage-used-mgmt-command-cronjob.yaml
deleted file mode 100644
index cd30ba6f2f..0000000000
--- a/k8s/templates/set-storage-used-mgmt-command-cronjob.yaml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "studio.fullname" . }}-set-storage-used-job-config
- labels:
- tier: job
- app: {{ template "studio.fullname" . }}
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-data:
- DJANGO_LOG_FILE: /var/log/django.log
- DATA_DB_HOST: {{ template "cloudsql-proxy.fullname" . }}
- DATA_DB_PORT: "5432"
- MPLBACKEND: PS
- RUN_MODE: k8s
- RELEASE_COMMIT_SHA: {{ .Values.studioApp.releaseCommit | default "" }}
- BRANCH_ENVIRONMENT: {{ .Release.Name }}
- AWS_BUCKET_NAME: {{ .Values.studioApp.gcs.bucketName }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "studio.fullname" . }}-set-storage-used-job-secrets
- labels:
- app: {{ template "studio.fullname" . }}
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-type: Opaque
-data:
- DATA_DB_USER: {{ index .Values "cloudsql-proxy" "credentials" "username" | b64enc }}
- DATA_DB_PASS: {{ index .Values "cloudsql-proxy" "credentials" "password" | b64enc }}
- DATA_DB_NAME: {{ index .Values "cloudsql-proxy" "credentials" "dbname" | b64enc }}
- SENTRY_DSN_KEY: {{ .Values.sentry.dsnKey | b64enc }}
----
-apiVersion: batch/v1beta1
-kind: CronJob
-metadata:
- name: set-storage-used-cronjob
- labels:
- tier: job
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-spec:
- schedule: "@midnight"
- jobTemplate:
- spec:
- template:
- spec:
- restartPolicy: OnFailure
- containers:
- - name: app
- image: {{ .Values.studioApp.imageName }}
- command:
- - python
- - contentcuration/manage.py
- - set_storage_used
- env:
- - name: DJANGO_SETTINGS_MODULE
- value: contentcuration.production_settings
- envFrom:
- - configMapRef:
- name: {{ template "studio.fullname" . }}-set-storage-used-job-config
- - secretRef:
- name: {{ template "studio.fullname" . }}-set-storage-used-job-secrets
- resources:
- requests:
- cpu: 0.5
- memory: 1Gi
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: full-gcp-access-scope
- operator: In
- values:
- - "true"
diff --git a/k8s/templates/studio-deployment.yaml b/k8s/templates/studio-deployment.yaml
deleted file mode 100644
index f6a74f36fa..0000000000
--- a/k8s/templates/studio-deployment.yaml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ template "studio.fullname" . }}
- labels:
- tier: app
- app: {{ template "studio.fullname" . }}
-spec:
- replicas: {{ .Values.studioApp.replicas }}
- selector:
- matchLabels:
- app: {{ template "studio.fullname" . }}
- tier: frontend
- template:
- metadata:
- annotations:
- checksum: {{ include (print $.Template.BasePath "/job-template.yaml") . | sha256sum }}
- labels:
- app: {{ template "studio.fullname" . }}
- tier: frontend
- spec:
- initContainers:
- - name: collectstatic
- image: {{ .Values.studioApp.imageName }}
- workingDir: /contentcuration/
- command:
- - make
- args:
- - collectstatic
- env:
- - name: DJANGO_SETTINGS_MODULE
- value: contentcuration.collectstatic_settings
- - name: STATICFILES_DIR
- value: /app/contentworkshop_static/
- volumeMounts:
- - mountPath: /app/contentworkshop_static/
- name: staticfiles
- containers:
- - name: app
- image: {{ .Values.studioApp.imageName }}
- workingDir: /contentcuration/contentcuration/
- command:
- - gunicorn
- args:
- - contentcuration.wsgi:application
- - --timeout=4000
- - --workers=2
- - --bind=0.0.0.0:{{ .Values.studioApp.appPort }}
- - --pid=/tmp/contentcuration.pid
- env: {{ include "studio.sharedEnvs" . | nindent 8 }}
- - name: SEND_USER_ACTIVATION_NOTIFICATION_EMAIL
- value: "true"
- - name: DATA_DB_HOST
- value: {{ template "cloudsql-proxy.fullname" . }}
- - name: GOOGLE_CLOUD_STORAGE_SERVICE_ACCOUNT_CREDENTIALS
- value: /var/secrets/gcs-writer-service-account-key.json
- ports:
- - containerPort: {{ .Values.studioApp.appPort }}
- readinessProbe:
- httpGet:
- path: /healthz
- port: {{ .Values.studioApp.appPort }}
- initialDelaySeconds: 5
- periodSeconds: 2
- failureThreshold: 3
- resources:
- requests:
- cpu: 0.5
- memory: 2Gi
- limits:
- memory: 2Gi
- volumeMounts:
- - mountPath: /var/secrets
- name: gcs-writer-service-account-key
- readOnly: true
- - name: nginx-proxy
- image: {{ .Values.studioNginx.imageName }}
- env:
- - name: AWS_S3_ENDPOINT_URL
- value: https://storage.googleapis.com
- - name: AWS_BUCKET_NAME
- value: {{ .Values.studioApp.gcs.bucketName }}
- ports:
- - containerPort: {{ .Values.studioNginx.port }}
- volumeMounts:
- - mountPath: /app/contentworkshop_static/
- name: staticfiles
- resources:
- requests:
- cpu: 0.2
- memory: 256Mi
- limits:
- memory: 512Mi
- volumes:
- - emptyDir: {}
- name: staticfiles
- - name: gcs-writer-service-account-key
- secret:
- secretName: {{ template "studio.fullname" . }}
- items:
- - key: gcs-writer-service-account-key
- path: gcs-writer-service-account-key.json
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: full-gcp-access-scope
- operator: In
- values:
- - "true"
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{template "studio.fullname" . }}-workers
-spec:
- replicas: {{ .Values.studioWorkers.replicas }}
- selector:
- matchLabels:
- app: {{ template "studio.fullname" . }}-workers
- tier: workers
- template:
- metadata:
- labels:
- app: {{ template "studio.fullname" . }}-workers
- tier: workers
- spec:
- containers:
- - name: worker
- image: {{ .Values.studioApp.imageName }}
- command:
- - make
- {{- if not .Values.productionIngress }}
- - setup
- {{- end }}
- - prodceleryworkers
- env: {{ include "studio.sharedEnvs" . | nindent 8 }}
- - name: DATA_DB_HOST
- value: {{ template "cloudsql-proxy.fullname" . }}
- resources:
- requests:
- cpu: 0.5
- memory: 2Gi
- limits:
- cpu: 2
- memory: 8Gi
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: full-gcp-access-scope
- operator: In
- values:
- - "true"
diff --git a/k8s/templates/studio-secrets.yaml b/k8s/templates/studio-secrets.yaml
deleted file mode 100644
index 51f2589a3e..0000000000
--- a/k8s/templates/studio-secrets.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "studio.fullname" . }}
- labels:
- app: studio
- chart: {{ .Chart.Name }}
- release: {{ .Release.Name }}
-type: Opaque
-data:
- postmark-api-key: {{ .Values.studioApp.postmarkApiKey | default "" | b64enc }}
- redis-password: {{ .Values.redis.password | default "" | b64enc }}
- postgres-user: {{ index .Values "cloudsql-proxy" "credentials" "username" | b64enc }}
- postgres-password: {{ index .Values "cloudsql-proxy" "credentials" "password" | b64enc }}
- postgres-database: {{ index .Values "cloudsql-proxy" "credentials" "dbname" | b64enc }}
- sentry-dsn-key: {{ .Values.sentry.dsnKey | b64enc }}
- gcs-writer-service-account-key: {{ .Values.studioApp.gcs.writerServiceAccountKeyBase64Encoded }}
diff --git a/k8s/templates/studio-service.yaml b/k8s/templates/studio-service.yaml
deleted file mode 100644
index 8f70e6f54d..0000000000
--- a/k8s/templates/studio-service.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ template "studio.fullname" . }}-app
-spec:
- ports:
- - port: 80
- targetPort: {{ .Values.studioNginx.port }}
- selector:
- app: {{ template "studio.fullname" . }}
- tier: frontend
- type: NodePort
diff --git a/k8s/values.yaml b/k8s/values.yaml
deleted file mode 100644
index 11db6c1559..0000000000
--- a/k8s/values.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-# A set of values that are meant to be used for a production setup.
-# This includes:
-# - an external Postgres, GCS Storage, and external Redis
-# - real email sending
-# - studio production settings
-#
-# Note that the secrets will have to be filled up by the caller
-# through helm upgrade --set. See REPLACEME placeholders
-# for values that need to be set.
-
-settings: contentcuration.sandbox_settings
-
-productionIngress: true
-
-studioApp:
- imageName: "REPLACEME"
- postmarkApiKey: "REPLACEME"
- releaseCommit: ""
- replicas: 5
- appPort: 8081
- gcs:
- bucketName: develop-studio-content
- writerServiceAccountKeyBase64Encoded: "REPLACEME"
- pgbouncer:
- replicas: 3
- pool_size: 10
- reserve_pool_size: 10
-
-studioNginx:
- imageName: "REPLACEME"
- port: 8080
-
-sentry:
- dsnKey: ""
-
-cloudsql-proxy:
- enabled: true
- cloudsql:
- instances:
- - instance: "REPLACEME"
- project: "REPLACEME"
- region: "REPLACEME"
- port: 5432
- credentials:
- username: ""
- password: ""
- dbname: ""
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: full-gcp-access-scope
- operator: In
- values:
- - "true"
-
-redis:
- enabled: true
-
-studioWorkers:
- replicas: 5
-
-
-studioProber:
- imageName: "REPLACEME"
- loginProberUsername: "REPLACEME"
- loginProberPassword: "REPLACEME"
- port: 9313
diff --git a/requirements.in b/requirements.in
index 27ca67281c..c86a1d37c3 100644
--- a/requirements.in
+++ b/requirements.in
@@ -5,7 +5,7 @@ djangorestframework==3.15.1
psycopg2-binary==2.9.10
django-js-reverse==0.10.2
django-registration==3.4
-le-utils>=0.2.12
+le-utils==0.2.14
gunicorn==23.0.0
django-postmark==0.1.6
jsonfield==3.1.0
diff --git a/requirements.txt b/requirements.txt
index 9863c77097..2e227661d1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -162,7 +162,7 @@ language-data==1.3.0
# via langcodes
latex2mathml==3.78.0
# via -r requirements.in
-le-utils==0.2.12
+le-utils==0.2.14
# via -r requirements.in
marisa-trie==1.2.1
# via language-data