diff --git a/.env b/.env index 6fd79cd1..f974af57 100644 --- a/.env +++ b/.env @@ -1,12 +1,21 @@ -OL_REFERENCE_UI_VERSION=5.2.7-SNAPSHOT - -OL_REQUISITION_VERSION=8.3.7-SNAPSHOT -OL_REFERENCEDATA_VERSION=15.2.6-SNAPSHOT -OL_AUTH_VERSION=4.3.4-SNAPSHOT -OL_NOTIFICATION_VERSION=4.3.4-SNAPSHOT -OL_FULFILLMENT_VERSION=9.0.5-SNAPSHOT +OL_REFERENCE_UI_VERSION=1.4.1 +#OL_REQUISITION_VERSION=8.3.7-SNAPSHOT +OL_REQUISITION_VERSION=1.0.7 +OL_REFERENCEDATA_VERSION=1.0.1 +OL_AUTH_VERSION=0.0.1 +OL_NOTIFICATION_VERSION=1.0.7 +#OL_FULFILLMENT_VERSION=9.0.5-SNAPSHOT +OL_FULFILLMENT_VERSION=1.0.0 OL_CCE_VERSION=1.3.3-SNAPSHOT -OL_STOCKMANAGEMENT_VERSION=5.1.10-SNAPSHOT +OL_STOCKMANAGEMENT_VERSION=1.1.6 +#curr stock 1.1.4 +OL_PREPACKING_VERSION=1.0.9 +#OL_PREPACKING_VERSION=latest +OL_DISPENSING_VERSION=1.1.1 +#OL_STOCKMANAGEMENT_VERSION=latest +#OL_POINTOFDELIVERY_VERSION=1.0.1 +OL_POINTOFDELIVERY_VERSION=1.0.6 +OL_INVENTORYMANAGEMENT_VERSION=latest OL_REPORT_VERSION=1.2.3-SNAPSHOT OL_BUQ_VERSION=1.0.0-SNAPSHOT OL_HAPIFHIR_VERSION=2.0.3-SNAPSHOT @@ -16,7 +25,8 @@ OL_ONE_NETWORK_INTEGRATION_STOCKMANAGEMENT_EXTENSION_VERSION=0.0.1 OL_DIAGNOSTICS_VERSION=1.1.3-SNAPSHOT -OL_NGINX_VERSION=5 +#OL_NGINX_VERSION=5 +OL_NGINX_VERSION=1.0.0 OL_RSYSLOG_VERSION=1 -OL_POSTGRES_VERSION=12-debezium \ No newline at end of file +OL_POSTGRES_VERSION=12-debezium diff --git a/docker-compose.openlmis-dev.yml b/docker-compose.openlmis-dev.yml new file mode 100644 index 00000000..88c8d781 --- /dev/null +++ b/docker-compose.openlmis-dev.yml @@ -0,0 +1,260 @@ +version: "3.3" +services: + + consul: + command: -server -bootstrap + image: gliderlabs/consul-server + ports: + - "8300" + - "8400" + - "8500:8500" + - "53" + + nginx: + #image: openlmis/nginx:${OL_NGINX_VERSION} + image: elmislesotho/nginx:${OL_NGINX_VERSION} + ports: + - "${OL_HTTP_PORT:-80}:80" + env_file: settings.env + environment: + NGINX_LOG_DIR: '/var/log/nginx/log' + NGINX_TIMEOUT: '300s' + volumes: + - 'nginx-log:/var/log/nginx/log' + - 'consul-template-log:/var/log/consul-template' + depends_on: [consul] + + reference-ui: + image: elmislesotho/reference-ui:${OL_REFERENCE_UI_VERSION} + env_file: settings.env + depends_on: [consul] + + requisition: + image: elmislesotho/requisition:${OL_REQUISITION_VERSION} + env_file: settings.env + environment: + #JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + JAVA_OPTS: > + -server + -Xms1024m + -Xmx3072m + -XX:+UseG1GC + -XX:MetaspaceSize=128m + -XX:MaxMetaspaceSize=256m + -Xss512k + -XX:+PrintGCDetails + -XX:+PrintGCDateStamps + -Xloggc:/var/log/requisition/gc.log + -Dlogging.config=/config/log/logback.xml + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + referencedata: + image: elmislesotho/referencedata:${OL_REFERENCEDATA_VERSION} + env_file: settings.env + environment: + #JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + JAVA_OPTS: > + -server + -Xms1024m + -Xmx3072m + -XX:+UseG1GC + -XX:MetaspaceSize=128m + -XX:MaxMetaspaceSize=256m + -Xss512k + -XX:+PrintGCDetails + -XX:+PrintGCDateStamps + -Xloggc:/var/log/referencedata/gc.log + -Dlogging.config=/config/log/logback.xml + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + auth: + image: elmislesotho/auth:${OL_AUTH_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + notification: + image: elmislesotho/notification:${OL_NOTIFICATION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + fulfillment: + image: elmislesotho/fulfillment:${OL_FULFILLMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + cce: + image: openlmis/cce:${OL_CCE_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + stockmanagement: + image: elmislesotho/stockmanagement:${OL_STOCKMANAGEMENT_VERSION} + env_file: settings.env + environment: + #JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + JAVA_OPTS: > + -server + -Xms1024m + -Xmx3072m + -XX:+UseG1GC + -XX:MetaspaceSize=128m + -XX:MaxMetaspaceSize=256m + -Xss512k + -XX:+PrintGCDetails + -XX:+PrintGCDateStamps + -Xloggc:/var/log/stockmanagement/gc.log + -Dlogging.config=/config/log/logback.xml + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + # prepacking: + # image: elmislesotho/prepacking:${OL_PREPACKING_VERSION} + # env_file: settings.env + # environment: + # JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + # volumes: + # - 'service-config:/config' + # depends_on: [log, db] + # command: ["/wait-for-postgres.sh", "/run.sh"] + + # dispensing: + # image: elmislesotho/dispensing:${OL_DISPENSING_VERSION} + # env_file: settings.env + # environment: + # JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + # volumes: + # - 'service-config:/config' + # depends_on: [log, db] + # command: ["/wait-for-postgres.sh", "/run.sh"] + + report: + image: openlmis/report:${OL_REPORT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + buq: + image: openlmis/buq:${OL_BUQ_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [ log, db ] + command: [ "/wait-for-postgres.sh", "/run.sh" ] + + hapifhir: + restart: always + image: openlmis/hapifhir:${OL_HAPIFHIR_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + diagnostics: + image: openlmis/diagnostics:${OL_DIAGNOSTICS_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log] + + db: + image: openlmis/postgres:${OL_POSTGRES_VERSION} + env_file: settings.env + networks: + default: + aliases: + - olmis-db + depends_on: [consul] + + log: + image: openlmis/rsyslog:${OL_RSYSLOG_VERSION} + volumes: + - 'syslog:/var/log' + depends_on: + - service-configuration + - consul + + service-configuration: + build: + context: ./config + volumes: + - service-config:/config + + ftp: + image: driesva/proftpd:latest + ports: + - "${OL_FTP_PORT_21:-21}:21" + - "${OL_FTP_PORT_20:-20}:20" + env_file: settings.env + depends_on: [consul] + + redis: + image: redis:3.2.12 + depends_on: [consul] + + pointofdelivery: + image: elmislesotho/pointofdelivery:${OL_POINTOFDELIVERY_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'pointofdelivery-extensions:/extensions' + - 'service-config:/config' + depends_on: [log, db] + + # example-extensions: + # image: openlmis/openlmis-example-extensions:latest + # volumes: + # - 'example-extensions:/extensions' + +volumes: + syslog: + external: false + nginx-log: + external: false + consul-template-log: + external: false + service-config: + external: false + pointofdelivery-extensions: + external: false diff --git a/docker-compose.openlmis-dev.yml.beforeTuning_10July2024 b/docker-compose.openlmis-dev.yml.beforeTuning_10July2024 new file mode 100644 index 00000000..5b96f544 --- /dev/null +++ b/docker-compose.openlmis-dev.yml.beforeTuning_10July2024 @@ -0,0 +1,222 @@ +version: "3.3" +services: + + consul: + command: -server -bootstrap + image: gliderlabs/consul-server + ports: + - "8300" + - "8400" + - "8500:8500" + - "53" + + nginx: + image: openlmis/nginx:${OL_NGINX_VERSION} + ports: + - "${OL_HTTP_PORT:-80}:80" + env_file: settings.env + environment: + NGINX_LOG_DIR: '/var/log/nginx/log' + volumes: + - 'nginx-log:/var/log/nginx/log' + - 'consul-template-log:/var/log/consul-template' + depends_on: [consul] + + reference-ui: + image: elmislesotho/reference-ui:${OL_REFERENCE_UI_VERSION} + env_file: settings.env + depends_on: [consul] + + requisition: + image: elmislesotho/requisition:${OL_REQUISITION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + referencedata: + image: openlmis/referencedata:${OL_REFERENCEDATA_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + auth: + image: openlmis/auth:${OL_AUTH_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + notification: + image: elmislesotho/notification:${OL_NOTIFICATION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + fulfillment: + image: openlmis/fulfillment:${OL_FULFILLMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + cce: + image: openlmis/cce:${OL_CCE_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + stockmanagement: + image: elmislesotho/stockmanagement:${OL_STOCKMANAGEMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + prepacking: + image: elmislesotho/prepacking:${OL_PREPACKING_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + dispensing: + image: elmislesotho/dispensing:${OL_DISPENSING_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + report: + image: openlmis/report:${OL_REPORT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + buq: + image: openlmis/buq:${OL_BUQ_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [ log, db ] + command: [ "/wait-for-postgres.sh", "/run.sh" ] + + hapifhir: + restart: always + image: openlmis/hapifhir:${OL_HAPIFHIR_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + diagnostics: + image: openlmis/diagnostics:${OL_DIAGNOSTICS_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log] + + db: + image: openlmis/postgres:${OL_POSTGRES_VERSION} + env_file: settings.env + networks: + default: + aliases: + - olmis-db + depends_on: [consul] + + log: + image: openlmis/rsyslog:${OL_RSYSLOG_VERSION} + volumes: + - 'syslog:/var/log' + depends_on: + - service-configuration + - consul + + service-configuration: + build: + context: ./config + volumes: + - service-config:/config + + ftp: + image: hauptmedia/proftpd + ports: + - "${OL_FTP_PORT_21:-21}:21" + - "${OL_FTP_PORT_20:-20}:20" + env_file: settings.env + depends_on: [consul] + + redis: + image: redis:3.2.12 + depends_on: [consul] + + pointofdelivery: + image: elmislesotho/pointofdelivery:${OL_POINTOFDELIVERY_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'pointofdelivery-extensions:/extensions' + - 'service-config:/config' + depends_on: [log, db] + + # example-extensions: + # image: openlmis/openlmis-example-extensions:latest + # volumes: + # - 'example-extensions:/extensions' + +volumes: + syslog: + external: false + nginx-log: + external: false + consul-template-log: + external: false + service-config: + external: false + pointofdelivery-extensions: + external: false diff --git a/docker-compose.openlmis-pointofdelivery-init.yml b/docker-compose.openlmis-pointofdelivery-init.yml new file mode 100644 index 00000000..7e65254f --- /dev/null +++ b/docker-compose.openlmis-pointofdelivery-init.yml @@ -0,0 +1,202 @@ +version: "3.3" +services: + + consul: + command: -server -bootstrap + image: gliderlabs/consul-server + ports: + - "8300" + - "8400" + - "8500:8500" + - "53" + + nginx: + image: openlmis/nginx:${OL_NGINX_VERSION} + ports: + - "${OL_HTTP_PORT:-80}:80" + env_file: settings.env + environment: + NGINX_LOG_DIR: '/var/log/nginx/log' + volumes: + - 'nginx-log:/var/log/nginx/log' + - 'consul-template-log:/var/log/consul-template' + depends_on: [consul] + + reference-ui: + image: elmislesotho/reference-ui:${OL_REFERENCE_UI_VERSION} + env_file: settings.env + depends_on: [consul] + + requisition: + image: openlmis/requisition:${OL_REQUISITION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + referencedata: + image: openlmis/referencedata:${OL_REFERENCEDATA_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + auth: + image: openlmis/auth:${OL_AUTH_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + notification: + image: openlmis/notification:${OL_NOTIFICATION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + fulfillment: + image: openlmis/fulfillment:${OL_FULFILLMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + cce: + image: openlmis/cce:${OL_CCE_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + stockmanagement: + image: elmislesotho/stockmanagement:${OL_STOCKMANAGEMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + report: + image: openlmis/report:${OL_REPORT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + buq: + image: openlmis/buq:${OL_BUQ_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [ log, db ] + command: [ "/wait-for-postgres.sh", "/run.sh" ] + + hapifhir: + restart: always + image: openlmis/hapifhir:${OL_HAPIFHIR_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + diagnostics: + image: openlmis/diagnostics:${OL_DIAGNOSTICS_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log] + + db: + image: openlmis/postgres:${OL_POSTGRES_VERSION} + env_file: settings.env + networks: + default: + aliases: + - olmis-db + depends_on: [consul] + + log: + image: openlmis/rsyslog:${OL_RSYSLOG_VERSION} + volumes: + - 'syslog:/var/log' + depends_on: + - service-configuration + - consul + + service-configuration: + build: + context: ./config + volumes: + - service-config:/config + + ftp: + image: hauptmedia/proftpd + ports: + - "${OL_FTP_PORT_21:-21}:21" + - "${OL_FTP_PORT_20:-20}:20" + env_file: settings.env + depends_on: [consul] + + redis: + image: redis:3.2.12 + depends_on: [consul] + + pointofdelivery: + image: elmislesotho/pointofdelivery:${OL_POINTOFDELIVERY_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'pointofdelivery-extensions:/extensions' + - 'service-config:/config' + depends_on: [log, db] + + # example-extensions: + # image: openlmis/openlmis-example-extensions:latest + # volumes: + # - 'example-extensions:/extensions' + +volumes: + syslog: + external: false + nginx-log: + external: false + consul-template-log: + external: false + service-config: + external: false + pointofdelivery-extensions: + external: false diff --git a/docker-compose.openlmis-prod.yml b/docker-compose.openlmis-prod.yml new file mode 100644 index 00000000..2237d067 --- /dev/null +++ b/docker-compose.openlmis-prod.yml @@ -0,0 +1,205 @@ +version: "3.3" +services: + + consul: + command: -server -bootstrap + image: gliderlabs/consul-server + ports: + - "8300" + - "8400" + - "8500:8500" + - "53" + + nginx: + image: openlmis/nginx:${OL_NGINX_VERSION} + ports: + - "${OL_HTTP_PORT:-80}:80" + env_file: settings.env + environment: + NGINX_LOG_DIR: '/var/log/nginx/log' + volumes: + - 'nginx-log:/var/log/nginx/log' + - 'consul-template-log:/var/log/consul-template' + depends_on: [consul] + + reference-ui: + image: elmislesotho/reference-ui:${OL_REFERENCE_UI_VERSION} + env_file: settings.env + depends_on: [consul] + + requisition: + image: openlmis/requisition:${OL_REQUISITION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + referencedata: + image: openlmis/referencedata:${OL_REFERENCEDATA_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + auth: + image: openlmis/auth:${OL_AUTH_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + notification: + image: openlmis/notification:${OL_NOTIFICATION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + fulfillment: + image: openlmis/fulfillment:${OL_FULFILLMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + cce: + image: openlmis/cce:${OL_CCE_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + stockmanagement: + image: elmislesotho/stockmanagement:${OL_STOCKMANAGEMENT_VERSION} + #image: openlmis/stockmanagement:latest + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + report: + image: openlmis/report:${OL_REPORT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + buq: + image: openlmis/buq:${OL_BUQ_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [ log] + command: [ "/wait-for-postgres.sh", "/run.sh" ] + + hapifhir: + restart: always + image: openlmis/hapifhir:${OL_HAPIFHIR_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log] + command: ["/wait-for-postgres.sh", "/run.sh"] + + diagnostics: + image: openlmis/diagnostics:${OL_DIAGNOSTICS_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log] + + # db: + # image: openlmis/postgres:${OL_POSTGRES_VERSION} + # env_file: settings.env + # networks: + # default: + # aliases: + # - olmis-db + # depends_on: [consul] + + log: + image: openlmis/rsyslog:${OL_RSYSLOG_VERSION} + volumes: + - 'syslog:/var/log' + depends_on: + - service-configuration + - consul + + service-configuration: + build: + context: ./config + volumes: + - service-config:/config + + ftp: + image: hauptmedia/proftpd + ports: + - "${OL_FTP_PORT_21:-21}:21" + - "${OL_FTP_PORT_20:-20}:20" + env_file: settings.env + depends_on: [consul] + + redis: + image: redis:3.2.12 + depends_on: [consul] + + pointofdelivery: + #image: elmislesotho/pointofdelivery:${OL_POINTOFDELIVERY_VERSION} + image: elmislesotho/pointofdelivery:${OL_POINTOFDELIVERY_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'pointofdelivery-extensions:/extensions' + - 'service-config:/config' + depends_on: [log] + command: [ "/wait-for-postgres.sh", "/run.sh" ] + + # example-extensions: + # image: openlmis/openlmis-example-extensions:latest + # volumes: + # - 'example-extensions:/extensions' + +volumes: + syslog: + external: false + nginx-log: + external: false + consul-template-log: + external: false + service-config: + external: false + pointofdelivery-extensions: + external: false diff --git a/docker-compose.openlmis-ref-distro-example-service.yml b/docker-compose.openlmis-ref-distro-example-service.yml new file mode 100644 index 00000000..8df7e5cc --- /dev/null +++ b/docker-compose.openlmis-ref-distro-example-service.yml @@ -0,0 +1,202 @@ +version: "3.3" +services: + + consul: + command: -server -bootstrap + image: gliderlabs/consul-server + ports: + - "8300" + - "8400" + - "8500:8500" + - "53" + + nginx: + image: openlmis/nginx:${OL_NGINX_VERSION} + ports: + - "${OL_HTTP_PORT:-80}:80" + env_file: settings.env + environment: + NGINX_LOG_DIR: '/var/log/nginx/log' + volumes: + - 'nginx-log:/var/log/nginx/log' + - 'consul-template-log:/var/log/consul-template' + depends_on: [consul] + + reference-ui: + image: openlmis/reference-ui:${OL_REFERENCE_UI_VERSION} + env_file: settings.env + depends_on: [consul] + + requisition: + image: openlmis/requisition:${OL_REQUISITION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + referencedata: + image: openlmis/referencedata:${OL_REFERENCEDATA_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + auth: + image: openlmis/auth:${OL_AUTH_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + notification: + image: openlmis/notification:${OL_NOTIFICATION_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + fulfillment: + image: openlmis/fulfillment:${OL_FULFILLMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + cce: + image: openlmis/cce:${OL_CCE_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + stockmanagement: + image: openlmis/stockmanagement:${OL_STOCKMANAGEMENT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + report: + image: openlmis/report:${OL_REPORT_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + buq: + image: openlmis/buq:${OL_BUQ_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx1024m -Dlogging.config=/config/log/logback.xml' + volumes: + - 'service-config:/config' + depends_on: [ log, db ] + command: [ "/wait-for-postgres.sh", "/run.sh" ] + + hapifhir: + restart: always + image: openlmis/hapifhir:${OL_HAPIFHIR_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log, db] + command: ["/wait-for-postgres.sh", "/run.sh"] + + diagnostics: + image: openlmis/diagnostics:${OL_DIAGNOSTICS_VERSION} + env_file: settings.env + environment: + JAVA_OPTS: '-server -Xmx512m -Dlogging.config=/config/log/logback.xml' + spring_profiles_active: ${spring_profiles_active} + volumes: + - 'service-config:/config' + depends_on: [log] + + db: + image: openlmis/postgres:${OL_POSTGRES_VERSION} + env_file: settings.env + networks: + default: + aliases: + - olmis-db + depends_on: [consul] + + log: + image: openlmis/rsyslog:${OL_RSYSLOG_VERSION} + volumes: + - 'syslog:/var/log' + depends_on: + - service-configuration + - consul + + service-configuration: + build: + context: ./config + volumes: + - service-config:/config + + ftp: + image: hauptmedia/proftpd + ports: + - "${OL_FTP_PORT_21:-21}:21" + - "${OL_FTP_PORT_20:-20}:20" + env_file: settings.env + depends_on: [consul] + + redis: + image: redis:3.2.12 + depends_on: [consul] + + # example: + # image: openlmis/template-service:latest + # env_file: settings.env + # environment: + # JAVA_OPTS: '-Dlogging.config=/logback.xml' + # volumes: + # - 'example-extensions:/extensions' + # - './logback.xml:/logback.xml' + # depends_on: [log] + + # example-extensions: + # image: openlmis/openlmis-example-extensions:latest + # volumes: + # - 'example-extensions:/extensions' + +volumes: + syslog: + external: false + nginx-log: + external: false + consul-template-log: + external: false + service-config: + external: false + example-extensions: + external: false diff --git a/docker-compose.openlmis-stockmanagement-validator-extension.yml b/docker-compose.openlmis-stockmanagement-validator-extension.yml index 246ed8de..5849e059 100644 --- a/docker-compose.openlmis-stockmanagement-validator-extension.yml +++ b/docker-compose.openlmis-stockmanagement-validator-extension.yml @@ -87,6 +87,16 @@ services: depends_on: [log, db] command: ["/wait-for-postgres.sh", "/run.sh"] + example: + image: openlmis/template-service:latest + env_file: .env + environment: + JAVA_OPTS: '-Dlogging.config=/logback.xml' + volumes: + - 'example-extensions:/extensions' + - './logback.xml:/logback.xml' + depends_on: [log, example-extensions] + example-extensions: image: openlmis/openlmis-example-extensions:0.0.1-SNAPSHOT volumes: diff --git a/reporting/Caddyfile b/reporting/Caddyfile new file mode 100644 index 00000000..1ecf0de4 --- /dev/null +++ b/reporting/Caddyfile @@ -0,0 +1,12 @@ +dev.superset.elmis.gov.ls { + # Let Caddy auto-manage TLS for this hostname + tls admin@elmis.gov.ls + + # Bypass Nginx—direct to Superset container: + reverse_proxy superset:8088 { + header_up Host {host} + header_up X-Forwarded-Proto {scheme} + header_up X-Real-IP {remote} + header_up X-Forwarded-For {remote} + } +} diff --git a/reporting/README.md b/reporting/README.md index a63ac9a4..01da9fa6 100644 --- a/reporting/README.md +++ b/reporting/README.md @@ -13,8 +13,9 @@ $ cp settings-sample.env settings.env ``` * Edit `settings.env` to match your setup. Despite generating the passwords, you will likely need to change - `VIRTUAL_HOST`, `TRUSTED_HOSTNAME`, `OL_BASE_URL` (to point to OpenLMIS) and `NIFI_DOMAIN_NAME` and `SUPERSET_DOMAIN_NAME` (which should point to the reporting stack). + `VIRTUAL_HOST`,`TRUSTED_HOSTNAME`, `OL_BASE_URL` (to point to OpenLMIS), `NIFI_DOMAIN_NAME` and `SUPERSET_DOMAIN_NAME` (which should point to the reporting stack) and `SUPERSET_URL` to point to the Superset URL. Details on all the environment variables are below. + * NB: `SUPERSET_URL` in `settings.env` file in OpenLMIS should be updated to the superset URL. 5. Bring up the reporting stack by running [docker-compose](https://docs.docker.com/compose/) on the server: ```sh @@ -43,14 +44,14 @@ Upgrading Superset to new version consist of two steps: ##### Updating configuration volume -The `./upgrade-superset-config.sh` script can be used to update the configuration volume in one go. -It must be executed on the Superset's Docker host system. +The `./upgrade-superset-config.sh` script can be used to update the configuration volume in one go. +It must be executed on the Superset's Docker host system. The scripts starts a 'dummy' container linked ot the config volume, and copies (with override) the new content of `./config/services/superset`. ##### Running new Docker image -There are no special considerations. +There are no special considerations. Start and build the Docker image included in this project as described in **Deploying to a Server** section. ## OAuth User for Superset @@ -60,8 +61,8 @@ It is the specific user with `authorizedGrantTypes` set to `authorization_code` Example of a SQL statement creating that user (superset:changeme): ``` -INSERT INTO auth.oauth_client_details (clientId,authorities,authorizedGrantTypes,clientSecret,"scope") -VALUES ('superset','TRUSTED_CLIENT','authorization_code','changeme','read,write'); +INSERT INTO auth.oauth_client_details (clientId,authorities,authorizedGrantTypes,clientSecret,redirecturi,"scope") +VALUES ('superset','TRUSTED_CLIENT','authorization_code','changeme','https://MyURL/oauth-authorized/openlmis','read,write'); ``` Don't forget to set newly created user's credentials in settings.env. Example: diff --git a/reporting/config/services/akhq/akhq.yml b/reporting/config/services/akhq/akhq.yml new file mode 100644 index 00000000..1feb80fc --- /dev/null +++ b/reporting/config/services/akhq/akhq.yml @@ -0,0 +1,27 @@ +akhq: + connections: + kafka: + properties: + bootstrap.servers: "kafka:29092" + server: + access-log: false + security: + default-group: reader + groups: + admin: + roles: + - topic/read + - topic/insert + - topic/delete + - group/read + - group/delete + - group/offsets + - node/read + - node/config/update + # Authentication (optional, but recommended) + authentication: + basic: + - username: admin + password: admin + groups: + - admin diff --git a/reporting/config/services/connect/sink-referencedata.json b/reporting/config/services/connect/sink-referencedata.json index 8c817cbd..8a547fbd 100644 --- a/reporting/config/services/connect/sink-referencedata.json +++ b/reporting/config/services/connect/sink-referencedata.json @@ -8,6 +8,7 @@ "connection.user": "postgres", "connection.password": "p@ssw0rd", "auto.create": "true", + "auto.evolve": "true", "insert.mode": "upsert", "pk.fields": "id", "pk.mode": "record_key", diff --git a/reporting/config/services/connect/sink-requisition.json b/reporting/config/services/connect/sink-requisition.json index 41a182cd..cc78bf1f 100644 --- a/reporting/config/services/connect/sink-requisition.json +++ b/reporting/config/services/connect/sink-requisition.json @@ -8,6 +8,7 @@ "connection.user": "postgres", "connection.password": "p@ssw0rd", "auto.create": "true", + "auto.evolve": "true", "insert.mode": "upsert", "pk.fields": "id", "pk.mode": "record_key", diff --git a/reporting/config/services/connect/sink-stockmanagement.json b/reporting/config/services/connect/sink-stockmanagement.json new file mode 100644 index 00000000..0c3e83d0 --- /dev/null +++ b/reporting/config/services/connect/sink-stockmanagement.json @@ -0,0 +1,22 @@ +{ + "name": "sink-stockmanagement", + "config": { + "connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector", + "tasks.max": "1", + "topics": "public.kafka_nodes,public.kafka_stock_events,public.kafka_stock_event_line_items,public.kafka_stock_cards,public.kafka_stock_card_line_items,public.kafka_stock_card_line_item_reasons", + "connection.url": "jdbc:postgresql://reporting-db:5432/open_lmis_reporting?stringtype=unspecified", + "connection.user": "postgres", + "connection.password": "p@ssw0rd", + "auto.evolve": "true", + "auto.create": "true", + "insert.mode": "upsert", + "pk.fields": "id", + "pk.mode": "record_key", + "delete.enabled": "true", + "transforms": "TimestampConverter", + "transforms.TimestampConverter.type": "org.apache.kafka.connect.transforms.TimestampConverter$Value", + "transforms.TimestampConverter.target.type": "Timestamp", + "transforms.TimestampConverter.field": "processeddate" + } +} + \ No newline at end of file diff --git a/reporting/config/services/connect/source-stockmanagement.json b/reporting/config/services/connect/source-stockmanagement.json new file mode 100644 index 00000000..cc43bb74 --- /dev/null +++ b/reporting/config/services/connect/source-stockmanagement.json @@ -0,0 +1,29 @@ +{ + "name": "source-stockmanagement", + "config": { + "connector.class": "io.debezium.connector.postgresql.PostgresConnector", + "tasks.max": "1", + "plugin.name": "wal2json", + "database.hostname": "olmis-db", + "database.port": "5432", + "database.user": "postgres", + "database.password": "p@ssw0rd", + "database.dbname": "open_lmis", + "database.server.name": "openlmis", + "table.whitelist": "stockmanagement\\.nodes,stockmanagement\\.stock_events,stockmanagement\\.stock_event_line_items,stockmanagement\\.stock_cards,stockmanagement\\.stock_card_line_items,stockmanagement\\.stock_card_line_item_reasons", + "database.history.kafka.bootstrap.servers": "kafka:29092", + "database.history.kafka.topic": "openlmis", + "slot.name": "dbz_stockmanagement", + "time.precision.mode": "connect", + "heartbeat.interval.ms": 3000, + "heartbeat.action.query": "CREATE TABLE IF NOT EXISTS debezium_heartbeat (id SERIAL PRIMARY KEY, ts TIMESTAMPTZ); INSERT INTO debezium_heartbeat (id, ts) VALUES (1, NOW()) ON CONFLICT(id) DO UPDATE SET ts=NOW();", + "transforms": "unwrap,route", + "transforms.unwrap.type": "io.debezium.transforms.ExtractNewRecordState", + "transforms.unwrap.drop.tombstones": "false", + "transforms.unwrap.delete.handling.mode": "none", + "transforms.route.type": "org.apache.kafka.connect.transforms.RegexRouter", + "transforms.route.regex": "([^.]+)\\.([^.]+)\\.([^.]+)", + "transforms.route.replacement": "public.kafka_$3" + } + } + \ No newline at end of file diff --git a/reporting/config/services/nginx/init.sh b/reporting/config/services/nginx/init.sh index 04ae8825..46d19ae8 100755 --- a/reporting/config/services/nginx/init.sh +++ b/reporting/config/services/nginx/init.sh @@ -9,7 +9,14 @@ cp -r /config/nginx/consul-template/* /etc/consul-template/ echo -n "${NGINX_BASIC_AUTH_USER}:" >> /etc/nginx/.htpasswd openssl passwd -apr1 ${NGINX_BASIC_AUTH_PW} >> /etc/nginx/.htpasswd -echo "Waiting for consul to be available" -sleep 240; +# echo "Waiting for consul to be available" +# sleep 240; + +echo "Waiting for Consul at consul:8500 to be available…" +until wget -qO- http://consul:8500/v1/status/leader 2>/dev/null; do + printf "." + sleep 5 +done +echo "\nConsul is up—starting Nginx." /home/run.sh \ No newline at end of file diff --git a/reporting/config/services/superset/dashboards/exported_dashboards.zip b/reporting/config/services/superset/dashboards/exported_dashboards.zip new file mode 100644 index 00000000..cf1d4f1f Binary files /dev/null and b/reporting/config/services/superset/dashboards/exported_dashboards.zip differ diff --git a/reporting/config/services/superset/dashboards/openlmis_uat_dashboards.zip b/reporting/config/services/superset/dashboards/openlmis_uat_dashboards.zip_backup similarity index 100% rename from reporting/config/services/superset/dashboards/openlmis_uat_dashboards.zip rename to reporting/config/services/superset/dashboards/openlmis_uat_dashboards.zip_backup diff --git a/reporting/config/services/superset/dashboards/openlmis_uat_dashboards_db_on_host.zip b/reporting/config/services/superset/dashboards/openlmis_uat_dashboards_db_on_host.zip new file mode 100644 index 00000000..5a803061 Binary files /dev/null and b/reporting/config/services/superset/dashboards/openlmis_uat_dashboards_db_on_host.zip differ diff --git a/reporting/config/services/superset/datasources/database.yaml b/reporting/config/services/superset/datasources/database.yaml index 81101f1c..be3d9226 100644 --- a/reporting/config/services/superset/datasources/database.yaml +++ b/reporting/config/services/superset/datasources/database.yaml @@ -2,7 +2,8 @@ databases: - database_name: main expose_in_sqllab: true extra: "{\r\n \"metadata_params\": {},\r\n \"engine_params\": {}\r\n}\r\n" - sqlalchemy_uri: postgresql+psycopg2://postgres:XXXXXXXXXX@db:5432/open_lmis_reporting + #sqlalchemy_uri: postgresql+psycopg2://postgres:XXXXXXXXXX@db:5432/open_lmis_reporting + sqlalchemy_uri: postgresql+psycopg2://postgres:XXXXXXXXXX@10.255.100.97:5433/open_lmis_reporting tables: - columns: - {avg: true, column_name: adjusted_consumption, sum: true, type: DOUBLE PRECISION} @@ -1039,3 +1040,18 @@ databases: - {expression: COUNT(*), metric_name: count, metric_type: count, verbose_name: COUNT(*)} schema: public table_name: kafka_commodity_types + - columns: + - {column_name: processeddate, is_dttm: true, type: TIMESTAMP WITH TIME ZONE} + - {column_name: facilityid, type: UUID} + - {column_name: id, type: UUID} + - {column_name: programid, type: UUID} + - {column_name: userid, type: UUID} + - {column_name: documentnumber, type: TEXT} + - {column_name: signature, type: TEXT} + - {column_name: isshowed, type: BOOLEAN} + - {column_name: isactive, type: BOOLEAN} + main_dttm_col: processeddate + metrics: + - {expression: COUNT(*), metric_name: count, metric_type: count, verbose_name: COUNT(*)} + schema: public + table_name: kafka_stock_events diff --git a/reporting/config/services/superset/datasources/exported_datasets.zip b/reporting/config/services/superset/datasources/exported_datasets.zip new file mode 100644 index 00000000..cc58bbde Binary files /dev/null and b/reporting/config/services/superset/datasources/exported_datasets.zip differ diff --git a/reporting/config/services/superset/init.sh b/reporting/config/services/superset/init.sh index 5097ec81..5f4358e4 100755 --- a/reporting/config/services/superset/init.sh +++ b/reporting/config/services/superset/init.sh @@ -16,7 +16,8 @@ cp -rf $CONFIG_DIR/app-customizations/$SUPERSET_VERSION/* $APP_DIR && $APP_DIR/superset-frontend/js_build.sh && # wait for postgres -until PGPASSWORD=$POSTGRES_PASSWORD psql -h "db" -p "5432" -U "$POSTGRES_USER" -d "open_lmis_reporting" -c '\q'; do +#until PGPASSWORD=$POSTGRES_PASSWORD psql -h "db" -p "5432" -U "$POSTGRES_USER" -d "superset" -c '\q'; do +until PGPASSWORD=$POSTGRES_PASSWORD psql -h "$POSTGRES_HOST" -p "$POSTGRES_PORT" -U "$POSTGRES_USER" -d "open_lmis_reporting" -c '\q'; do >&2 echo "Postgres is unavailable - sleeping" sleep 5 done @@ -28,7 +29,11 @@ flask fab create-admin --username ${SUPERSET_ADMIN_USERNAME} --firstname Admin - superset db upgrade && superset import_datasources -p $CONFIG_DIR/datasources/database.yaml && -superset import_dashboards -u ${SUPERSET_ADMIN_USERNAME} -p $CONFIG_DIR/dashboards/openlmis_uat_dashboards.zip && +#superset import-datasources -p $CONFIG_DIR/datasources/exported_datasets.zip && +#superset import_dashboards -u ${SUPERSET_ADMIN_USERNAME} -p $CONFIG_DIR/dashboards/openlmis_uat_dashboards.zip && +superset import_dashboards -u ${SUPERSET_ADMIN_USERNAME} -p $CONFIG_DIR/dashboards/openlmis_uat_dashboards_db_on_host.zip && +#superset import_dashboards -u ${SUPERSET_ADMIN_USERNAME} -p $CONFIG_DIR/dashboards/exported_dashboards.zip && +#superset import_dashboards -u ${SUPERSET_ADMIN_USERNAME} -p $CONFIG_DIR/dashboards/elmis_superset_dahboards_04042025_1502.zip && superset init && gunicorn $GUNICORN_CMD_ARGS "superset.app:create_app()" diff --git a/reporting/config/services/superset/superset_config.py b/reporting/config/services/superset/superset_config.py index da4bc78e..03799d3e 100644 --- a/reporting/config/services/superset/superset_config.py +++ b/reporting/config/services/superset/superset_config.py @@ -6,6 +6,8 @@ from flask_appbuilder.security.manager import AUTH_OAUTH from superset_patchup.oauth import CustomSecurityManager +import logging +logging.warning("Sanity check: Using custom superset_config.py") def stringToBase64(s): return base64.b64encode(s.encode('utf-8')).decode('utf-8') @@ -15,7 +17,12 @@ def lookup_password(url): return os.environ['POSTGRES_PASSWORD'] -SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{}:{}@db:5432/open_lmis_reporting'.format( +# SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{}:{}@db:5432/open_lmis_reporting'.format( +# os.environ['POSTGRES_USER'], +# os.environ['POSTGRES_PASSWORD']) + + +SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{}:{}@10.255.100.97:5433/open_lmis_reporting'.format( os.environ['POSTGRES_USER'], os.environ['POSTGRES_PASSWORD']) diff --git a/reporting/cron/periodic/15min/refresh-mv b/reporting/cron/periodic/15min/refresh-mv index 66605972..29494df1 100755 --- a/reporting/cron/periodic/15min/refresh-mv +++ b/reporting/cron/periodic/15min/refresh-mv @@ -32,10 +32,31 @@ export PGDATABASE=$DB export PGUSER=$POSTGRES_USER echo "Refreshing reporting stack materialized views..." + psql <<-EOSQL + \echo 'Refreshing view_facility_access...' REFRESH MATERIALIZED VIEW view_facility_access; + + \echo 'Refreshing reporting_rate_and_timeliness...' REFRESH MATERIALIZED VIEW reporting_rate_and_timeliness; + + \echo 'Refreshing adjustments...' REFRESH MATERIALIZED VIEW adjustments; + + \echo 'Refreshing stock_status_and_consumption...' REFRESH MATERIALIZED VIEW stock_status_and_consumption; + + \echo 'Refreshing facilities...' REFRESH MATERIALIZED VIEW facilities; + + \echo 'Refreshing expired_products...' + REFRESH MATERIALIZED VIEW expired_products; + + \echo 'Refreshing stock_card_summaries...' + REFRESH MATERIALIZED VIEW stock_card_summaries; + + \echo 'Refreshing stock_card_summaries_with_prices...' + REFRESH MATERIALIZED VIEW stock_card_summaries_with_prices; EOSQL + +echo "Materialized view refresh complete." diff --git a/reporting/db-on-host-init/reporting-db-on-host.sh b/reporting/db-on-host-init/reporting-db-on-host.sh new file mode 100755 index 00000000..9f73ea18 --- /dev/null +++ b/reporting/db-on-host-init/reporting-db-on-host.sh @@ -0,0 +1,30 @@ +#!/bin/bash +set -e + +# Reporting DB configuration +DB_HOST="${POSTGRES_HOST:-172.17.0.1}" +DB_PORT="${POSTGRES_PORT:-5433}" +DB_NAME="${POSTGRES_DB:-open_lmis_reporting}" +DB_USER="${POSTGRES_USER:-postgres}" +DB_PASS="${POSTGRES_PASSWORD:-postgres}" + +export PGPASSWORD="$DB_PASS" + +echo "🔄 Connecting to PostgreSQL at $DB_HOST:$DB_PORT as user $DB_USER" + +# Check if the reporting DB already exists +if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -lqt | cut -d \| -f 1 | grep -qw "$DB_NAME"; then + echo "✅ Database '$DB_NAME' already exists, skipping creation." +else + echo "📦 Creating database '$DB_NAME'..." + psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" <<-EOSQL + CREATE DATABASE $DB_NAME; + GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER; +EOSQL +fi + +# Run schema setup +echo "📂 Applying schema from OlmisCreateTableStatements.sql..." +psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" < /db-on-host-init/templates/OlmisCreateTableStatements.sql + +echo "✅ Reporting DB initialized successfully" diff --git a/reporting/db-on-host-init/superset-db-on-host.sh b/reporting/db-on-host-init/superset-db-on-host.sh new file mode 100755 index 00000000..e7209ac3 --- /dev/null +++ b/reporting/db-on-host-init/superset-db-on-host.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -e + +# Superset DB and user configuration +DB_HOST="${POSTGRES_HOST:-172.17.0.1}" +DB_PORT="${POSTGRES_PORT:-5433}" +DB_ADMIN_USER="${POSTGRES_USER:-postgres}" +DB_ADMIN_PASS="${POSTGRES_PASSWORD:-postgres}" + +SUPERSET_DB="${SUPERSET_POSTGRES_USER:-superset}" +SUPERSET_USER="${SUPERSET_POSTGRES_USER:-superset}" +SUPERSET_PASS="${SUPERSET_POSTGRES_PASSWORD:-superset_pass}" + +export PGPASSWORD="$DB_ADMIN_PASS" + +echo "🔍 Connecting to $DB_HOST:$DB_PORT as $DB_ADMIN_USER to create Superset DB/user..." + +# Check if user exists +if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_ADMIN_USER" -tAc "SELECT 1 FROM pg_roles WHERE rolname='$SUPERSET_USER'" | grep -q 1; then + echo "✅ Superset user '$SUPERSET_USER' already exists, skipping creation." +else + echo "👤 Creating user '$SUPERSET_USER'..." + psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_ADMIN_USER" <<-EOSQL + CREATE USER $SUPERSET_USER WITH PASSWORD '$SUPERSET_PASS'; +EOSQL +fi + +# Check if DB exists +if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_ADMIN_USER" -lqt | cut -d \| -f 1 | grep -qw "$SUPERSET_DB"; then + echo "✅ Superset database '$SUPERSET_DB' already exists, skipping creation." +else + echo "📦 Creating Superset database '$SUPERSET_DB'..." + psql -v ON_ERROR_STOP=1 -h "$DB_HOST" -p "$DB_PORT" -U "$DB_ADMIN_USER" <<-EOSQL + CREATE DATABASE $SUPERSET_DB; + GRANT ALL PRIVILEGES ON DATABASE $SUPERSET_DB TO $SUPERSET_USER; +EOSQL +fi + +echo "✅ Superset DB and user setup complete" diff --git a/reporting/db-on-host-init/templates/OlmisCreateTableStatements.sql b/reporting/db-on-host-init/templates/OlmisCreateTableStatements.sql new file mode 100644 index 00000000..d042551f --- /dev/null +++ b/reporting/db-on-host-init/templates/OlmisCreateTableStatements.sql @@ -0,0 +1,2622 @@ +-- Olmis create table statements +-- Created by Craig Appl (cappl@ona.io) +-- Modified by A. Maritim (amaritim@ona.io) and J. Wambere (jwambere@ona.io) +-- Further modified by C. Ahn (chongsun.ahn@villagereach.org) +-- Further modified by Lesotho eLMIS team in April 2025 +-- Last Updated 19 May 2020 +-- + +--- On error (e.g. Table already exists) continue with the next statement +\set ON_ERROR_STOP off + +-- +-- Name: postgis; Type: EXTENSION; Schema: -; Owner: +-- + +CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public; + + +-- +-- Name: EXTENSION postgis; Type: COMMENT; Schema: -; Owner: +-- + +COMMENT ON EXTENSION postgis IS 'PostGIS geometry, geography, and raster spatial types and functions'; + +-- +-- Name: commodity_types; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_commodity_types ( + id uuid NOT NULL, + name character varying(255) NOT NULL, + classificationsystem character varying(255) NOT NULL, + classificationid character varying(255) NOT NULL, + parentid uuid +); + + +ALTER TABLE public.kafka_commodity_types OWNER TO postgres; + +-- +-- Name: dispensable_attributes; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_dispensable_attributes ( + dispensableid uuid NOT NULL, + key text NOT NULL, + value text NOT NULL +); + + +ALTER TABLE public.kafka_dispensable_attributes OWNER TO postgres; + +-- +-- Name: dispensables; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_dispensables ( + id uuid NOT NULL, + type text DEFAULT 'default'::text NOT NULL +); + + +ALTER TABLE public.kafka_dispensables OWNER TO postgres; + +-- +-- Name: facilities; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_facilities ( + id uuid NOT NULL, + active boolean NOT NULL, + code text NOT NULL, + comment text, + description text, + enabled boolean NOT NULL, + godowndate date, + golivedate date, + name text, + openlmisaccessible boolean, + geographiczoneid uuid NOT NULL, + operatedbyid uuid, + typeid uuid NOT NULL, + extradata jsonb, + location geometry +); + + +ALTER TABLE public.kafka_facilities OWNER TO postgres; + +-- +-- Name: facility_operators; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_facility_operators ( + id uuid NOT NULL, + code text NOT NULL, + description text, + displayorder integer, + name text +); + + +ALTER TABLE public.kafka_facility_operators OWNER TO postgres; + +-- +-- Name: facility_type_approved_products; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_facility_type_approved_products ( + id uuid NOT NULL, + versionnumber bigint NOT NULL, + orderableid uuid NOT NULL, + programid uuid NOT NULL, + facilitytypeid uuid NOT NULL, + maxperiodsofstock double precision NOT NULL, + minperiodsofstock double precision, + emergencyorderpoint double precision, + active boolean DEFAULT true NOT NULL, + lastupdated timestamp with time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.kafka_facility_type_approved_products OWNER TO postgres; + +-- +-- Name: facility_types; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_facility_types ( + id uuid NOT NULL, + active boolean, + code text NOT NULL, + description text, + displayorder integer, + name text +); + + +ALTER TABLE public.kafka_facility_types OWNER TO postgres; + +-- +-- Name: geographic_levels; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_geographic_levels ( + id uuid NOT NULL, + code text NOT NULL, + levelnumber integer NOT NULL, + name text +); + + +ALTER TABLE public.kafka_geographic_levels OWNER TO postgres; + +-- +-- Name: geographic_zones; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_geographic_zones ( + id uuid NOT NULL, + catchmentpopulation integer, + code text NOT NULL, + latitude numeric(8,5), + longitude numeric(8,5), + name text, + levelid uuid NOT NULL, + parentid uuid, + boundary geometry, + extradata jsonb +); + + +ALTER TABLE public.kafka_geographic_zones OWNER TO postgres; + +-- +-- Name: ideal_stock_amounts; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_ideal_stock_amounts ( + id uuid NOT NULL, + facilityid uuid NOT NULL, + processingperiodid uuid NOT NULL, + amount integer, + commoditytypeid uuid NOT NULL +); + + +ALTER TABLE public.kafka_ideal_stock_amounts OWNER TO postgres; + +-- +-- Name: lots; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_lots ( + id uuid NOT NULL, + lotcode text NOT NULL, + expirationdate date, + manufacturedate date, + tradeitemid uuid NOT NULL, + active boolean NOT NULL +); + + +ALTER TABLE public.kafka_lots OWNER TO postgres; + +-- +-- Name: orderable_children; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_orderable_children ( + id uuid NOT NULL, + parentid uuid NOT NULL, + parentversionnumber bigint NOT NULL, + orderableid uuid NOT NULL, + orderableversionnumber bigint NOT NULL, + quantity bigint NOT NULL +); + + +ALTER TABLE public.kafka_orderable_children OWNER TO postgres; + +-- +-- Name: orderable_display_categories; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_orderable_display_categories ( + id uuid NOT NULL, + code character varying(255), + displayname character varying(255), + displayorder integer NOT NULL +); + + +ALTER TABLE public.kafka_orderable_display_categories OWNER TO postgres; + +-- +-- Name: orderable_identifiers; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_orderable_identifiers ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL, + orderableid uuid NOT NULL, + orderableversionnumber bigint NOT NULL +); + + +ALTER TABLE public.kafka_orderable_identifiers OWNER TO postgres; + +-- +-- Name: orderables; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_orderables ( + id uuid NOT NULL, + fullproductname character varying(255), + packroundingthreshold bigint NOT NULL, + netcontent bigint NOT NULL, + code character varying(255), + roundtozero boolean NOT NULL, + description character varying(255), + extradata jsonb, + dispensableid uuid NOT NULL, + versionnumber bigint NOT NULL, + lastupdated timestamp with time zone DEFAULT now() NOT NULL, + minimumtemperaturevalue double precision, + minimumtemperaturecode character varying(30), + maximumtemperaturevalue double precision, + maximumtemperaturecode character varying(30), + inboxcubedimensionvalue double precision, + inboxcubedimensioncode character varying(30) +); + + +ALTER TABLE public.kafka_orderables OWNER TO postgres; + +-- +-- Name: processing_periods; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_processing_periods ( + id uuid NOT NULL, + description text, + enddate date NOT NULL, + name text NOT NULL, + startdate date NOT NULL, + processingscheduleid uuid NOT NULL, + extradata jsonb +); + + +ALTER TABLE public.kafka_processing_periods OWNER TO postgres; + +-- +-- Name: processing_schedules; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_processing_schedules ( + id uuid NOT NULL, + code text NOT NULL, + description text, + modifieddate timestamp with time zone, + name text NOT NULL +); + + +ALTER TABLE public.kafka_processing_schedules OWNER TO postgres; + +-- +-- Name: program_orderables; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_program_orderables ( + id uuid NOT NULL, + active boolean NOT NULL, + displayorder integer NOT NULL, + dosesperpatient integer, + fullsupply boolean NOT NULL, + priceperpack numeric(19,2), + orderabledisplaycategoryid uuid NOT NULL, + orderableid uuid NOT NULL, + programid uuid NOT NULL, + orderableversionnumber bigint NOT NULL +); + + +ALTER TABLE public.kafka_program_orderables OWNER TO postgres; + +-- +-- Name: programs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_programs ( + id uuid NOT NULL, + active boolean, + code character varying(255), + description text, + name text, + periodsskippable boolean NOT NULL, + shownonfullsupplytab boolean, + enabledatephysicalstockcountcompleted boolean NOT NULL, + skipauthorization boolean DEFAULT false +); + + +ALTER TABLE public.kafka_programs OWNER TO postgres; + +-- +-- Name: requisition_group_members; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_group_members ( + requisitiongroupid uuid NOT NULL, + facilityid uuid NOT NULL +); + + +ALTER TABLE public.kafka_requisition_group_members OWNER TO postgres; + +-- +-- Name: requisition_group_program_schedules; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_group_program_schedules ( + id uuid NOT NULL, + directdelivery boolean NOT NULL, + dropofffacilityid uuid, + processingscheduleid uuid NOT NULL, + programid uuid NOT NULL, + requisitiongroupid uuid NOT NULL +); + + +ALTER TABLE public.kafka_requisition_group_program_schedules OWNER TO postgres; + +-- +-- Name: requisition_groups; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_groups ( + id uuid NOT NULL, + code text NOT NULL, + description text, + name text NOT NULL, + supervisorynodeid uuid NOT NULL +); + + +ALTER TABLE public.kafka_requisition_groups OWNER TO postgres; + +-- +-- Name: right_assignments; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_right_assignments ( + id uuid NOT NULL, + rightname text NOT NULL, + facilityid uuid, + programid uuid, + userid uuid NOT NULL +); + + +ALTER TABLE public.kafka_right_assignments OWNER TO postgres; + +-- +-- Name: right_attachments; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_right_attachments ( + rightid uuid NOT NULL, + attachmentid uuid NOT NULL +); + + +ALTER TABLE public.kafka_right_attachments OWNER TO postgres; + +-- +-- Name: rights; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_rights ( + id uuid NOT NULL, + description text, + name text NOT NULL, + type text NOT NULL +); + + +ALTER TABLE public.kafka_rights OWNER TO postgres; + +-- +-- Name: role_assignments; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_role_assignments ( + type character varying(31) NOT NULL, + id uuid NOT NULL, + roleid uuid, + userid uuid, + warehouseid uuid, + programid uuid, + supervisorynodeid uuid +); + + +ALTER TABLE public.kafka_role_assignments OWNER TO postgres; + +-- +-- Name: role_rights; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_role_rights ( + roleid uuid NOT NULL, + rightid uuid NOT NULL +); + + +ALTER TABLE public.kafka_role_rights OWNER TO postgres; + +-- +-- Name: roles; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_roles ( + id uuid NOT NULL, + description text, + name text NOT NULL +); + + +ALTER TABLE public.kafka_roles OWNER TO postgres; + +-- +-- Name: service_accounts; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_service_accounts ( + id uuid NOT NULL, + createdby uuid NOT NULL, + createddate timestamp with time zone NOT NULL +); + + +ALTER TABLE public.kafka_service_accounts OWNER TO postgres; + +-- +-- Name: supervisory_nodes; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supervisory_nodes ( + id uuid NOT NULL, + code text NOT NULL, + description text, + name text NOT NULL, + facilityid uuid, + parentid uuid, + extradata jsonb, + partnerid uuid +); + + +ALTER TABLE public.kafka_supervisory_nodes OWNER TO postgres; + +-- +-- Name: supply_lines; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supply_lines ( + id uuid NOT NULL, + description text, + programid uuid NOT NULL, + supervisorynodeid uuid NOT NULL, + supplyingfacilityid uuid NOT NULL +); + + +ALTER TABLE public.kafka_supply_lines OWNER TO postgres; + +-- +-- Name: supply_partner_association_facilities; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supply_partner_association_facilities ( + supplypartnerassociationid uuid NOT NULL, + facilityid uuid NOT NULL +); + + +ALTER TABLE public.kafka_supply_partner_association_facilities OWNER TO postgres; + +-- +-- Name: supply_partner_association_orderables; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supply_partner_association_orderables ( + supplypartnerassociationid uuid NOT NULL, + orderableid uuid NOT NULL, + orderableversionnumber bigint NOT NULL +); + + +ALTER TABLE public.kafka_supply_partner_association_orderables OWNER TO postgres; + +-- +-- Name: supply_partner_associations; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supply_partner_associations ( + id uuid NOT NULL, + programid uuid NOT NULL, + supervisorynodeid uuid NOT NULL, + supplypartnerid uuid NOT NULL +); + + +ALTER TABLE public.kafka_supply_partner_associations OWNER TO postgres; + +-- +-- Name: supply_partners; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supply_partners ( + id uuid NOT NULL, + name text NOT NULL, + code text NOT NULL +); + + +ALTER TABLE public.kafka_supply_partners OWNER TO postgres; + +-- +-- Name: supported_programs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_supported_programs ( + active boolean NOT NULL, + startdate date, + facilityid uuid NOT NULL, + programid uuid NOT NULL, + locallyfulfilled boolean DEFAULT false NOT NULL +); + + +ALTER TABLE public.kafka_supported_programs OWNER TO postgres; + +-- +-- Name: system_notifications; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_system_notifications ( + id uuid NOT NULL, + title character varying(255), + message text NOT NULL, + startdate timestamp with time zone, + createddate timestamp with time zone NOT NULL, + expirydate timestamp with time zone, + active boolean DEFAULT true NOT NULL, + authorid uuid NOT NULL +); + + +ALTER TABLE public.kafka_system_notifications OWNER TO postgres; + +-- +-- Name: trade_item_classifications; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_trade_item_classifications ( + id uuid NOT NULL, + classificationsystem character varying(255) NOT NULL, + classificationid character varying(255) NOT NULL, + tradeitemid uuid NOT NULL +); + + +ALTER TABLE public.kafka_trade_item_classifications OWNER TO postgres; + +-- +-- Name: trade_items; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_trade_items ( + id uuid NOT NULL, + manufactureroftradeitem character varying(255) NOT NULL, + gtin text +); + + +ALTER TABLE public.kafka_trade_items OWNER TO postgres; + +-- +-- Name: users; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_users ( + id uuid NOT NULL, + active boolean DEFAULT false NOT NULL, + allownotify boolean DEFAULT true, + email character varying(255), + extradata jsonb, + firstname text NOT NULL, + lastname text NOT NULL, + timezone character varying(255), + username text NOT NULL, + verified boolean DEFAULT false NOT NULL, + homefacilityid uuid, + jobtitle character varying(255), + phonenumber character varying(255) +); + + +ALTER TABLE public.kafka_users OWNER TO postgres; + +-- Kafka Stock Cards Table +CREATE TABLE public.kafka_stock_cards ( + id uuid NOT NULL, + facilityid uuid NOT NULL, + lotid uuid, + orderableid uuid NOT NULL, + programid uuid NOT NULL, + origineventid uuid NOT NULL, + isshowed boolean DEFAULT true, + isactive boolean DEFAULT true +); + +-- Kafka Stock Card Line Items Table +CREATE TABLE public.kafka_stock_card_line_items ( + id uuid NOT NULL, + stockcardid uuid NOT NULL, + quantity integer NOT NULL, + reasonid uuid, + occurreddate date NOT NULL, + processeddate timestamp without time zone NOT NULL, + destinationfreetext character varying(255), + documentnumber character varying(255), + reasonfreetext character varying(255), + signature character varying(255), + sourcefreetext character varying(255), + userid uuid NOT NULL, + destinationid uuid, + origineventid uuid NOT NULL, + sourceid uuid, + cartonnumber character varying(255), + invoicenumber character varying(255), + referencenumber character varying(255), + unitprice double precision, + extradata jsonb +); + + +-- Kafka Stock Card Line Item Reasons Table +CREATE TABLE public.kafka_stock_card_line_item_reasons ( + id uuid NOT NULL, + name text NOT NULL, + description text, + isfreetextallowed boolean NOT NULL, + reasoncategory text NOT NULL, + reasontype text NOT NULL +); + +-- Ownership +ALTER TABLE public.kafka_stock_cards OWNER TO postgres; +ALTER TABLE public.kafka_stock_card_line_items OWNER TO postgres; +ALTER TABLE public.kafka_stock_card_line_item_reasons OWNER TO postgres; + +ALTER TABLE ONLY public.kafka_stock_cards + ADD CONSTRAINT kafka_stock_cards_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.kafka_stock_card_line_items + ADD CONSTRAINT kafka_stock_card_line_items_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.kafka_stock_card_line_item_reasons + ADD CONSTRAINT kafka_stock_card_line_item_reasons_pkey PRIMARY KEY (id); + +-- ========================================== +-- Indexes for Kafka Stock Cards Table +-- ========================================== + +-- Index to speed up lookups by facility ID +CREATE INDEX kafka_stock_cards_facilityid_idx +ON public.kafka_stock_cards USING btree (facilityid); + +-- Index to optimize queries by orderable ID +CREATE INDEX kafka_stock_cards_orderableid_idx +ON public.kafka_stock_cards USING btree (orderableid); + +-- Index to improve performance for program-based searches +CREATE INDEX kafka_stock_cards_programid_idx +ON public.kafka_stock_cards USING btree (programid); + +-- Index to quickly retrieve records based on lot ID +CREATE INDEX kafka_stock_cards_lotid_idx +ON public.kafka_stock_cards USING btree (lotid); + +-- Index to enhance performance when querying by origin event ID +CREATE INDEX kafka_stock_cards_origineventid_idx +ON public.kafka_stock_cards USING btree (origineventid); + +-- ========================================== +-- Indexes for Kafka Stock Card Line Items Table +-- ========================================== + +-- Index to speed up lookups by stock card ID +CREATE INDEX kafka_stock_card_line_items_stockcardid_idx +ON public.kafka_stock_card_line_items USING btree (stockcardid); + +-- Index to improve performance when searching by reason ID +CREATE INDEX kafka_stock_card_line_items_reasonid_idx +ON public.kafka_stock_card_line_items USING btree (reasonid); + +-- Index to optimize user-based searches +CREATE INDEX kafka_stock_card_line_items_userid_idx +ON public.kafka_stock_card_line_items USING btree (userid); + +-- Index to enhance performance for origin event ID searches +CREATE INDEX kafka_stock_card_line_items_origineventid_idx +ON public.kafka_stock_card_line_items USING btree (origineventid); + +-- ========================================== +-- Indexes for Kafka Stock Card Line Item Reasons Table +-- ========================================== + +-- Index to speed up lookups by reason name +CREATE INDEX kafka_stock_card_line_item_reasons_name_idx +ON public.kafka_stock_card_line_item_reasons USING btree (name); + + +-- +-- Name: commodity_types commodity_types_classificationsystem_classificationid_key; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_commodity_types + ADD CONSTRAINT commodity_types_classificationsystem_classificationid_key UNIQUE (classificationsystem, classificationid); + + +-- +-- Name: commodity_types commodity_types_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_commodity_types + ADD CONSTRAINT commodity_types_pkey PRIMARY KEY (id); + + +-- +-- Name: dispensable_attributes dispensable_attributes_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_dispensable_attributes + ADD CONSTRAINT dispensable_attributes_pkey PRIMARY KEY (dispensableid, key); + + +-- +-- Name: dispensables dispensables_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_dispensables + ADD CONSTRAINT dispensables_pkey PRIMARY KEY (id); + + +-- +-- Name: facilities facilities_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_facilities + ADD CONSTRAINT facilities_pkey PRIMARY KEY (id); + + +-- +-- Name: facility_operators facility_operators_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_facility_operators + ADD CONSTRAINT facility_operators_pkey PRIMARY KEY (id); + + +-- +-- Name: facility_type_approved_products facility_type_approved_products_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_facility_type_approved_products + ADD CONSTRAINT facility_type_approved_products_pkey PRIMARY KEY (id, versionnumber); + + +-- +-- Name: facility_types facility_types_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_facility_types + ADD CONSTRAINT facility_types_pkey PRIMARY KEY (id); + + +-- +-- Name: geographic_levels geographic_levels_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_geographic_levels + ADD CONSTRAINT geographic_levels_pkey PRIMARY KEY (id); + + +-- +-- Name: geographic_zones geographic_zones_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_geographic_zones + ADD CONSTRAINT geographic_zones_pkey PRIMARY KEY (id); + + +-- +-- Name: ideal_stock_amounts ideal_stock_amounts_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_ideal_stock_amounts + ADD CONSTRAINT ideal_stock_amounts_pkey PRIMARY KEY (id); + + +-- +-- Name: lots lots_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_lots + ADD CONSTRAINT lots_pkey PRIMARY KEY (id); + + +-- +-- Name: orderable_children orderable_children_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_orderable_children + ADD CONSTRAINT orderable_children_pkey PRIMARY KEY (id); + + +-- +-- Name: orderable_display_categories orderable_display_categories_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_orderable_display_categories + ADD CONSTRAINT orderable_display_categories_pkey PRIMARY KEY (id); + + +-- +-- Name: orderables orderables_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_orderables + ADD CONSTRAINT orderables_pkey PRIMARY KEY (id, versionnumber); + + +-- +-- Name: right_assignments permission_strings_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_right_assignments + ADD CONSTRAINT permission_strings_pkey PRIMARY KEY (id); + + +-- +-- Name: processing_periods processing_periods_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_processing_periods + ADD CONSTRAINT processing_periods_pkey PRIMARY KEY (id); + + +-- +-- Name: processing_schedules processing_schedules_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_processing_schedules + ADD CONSTRAINT processing_schedules_pkey PRIMARY KEY (id); + + +-- +-- Name: program_orderables program_orderables_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_program_orderables + ADD CONSTRAINT program_orderables_pkey PRIMARY KEY (id); + + +-- +-- Name: programs programs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_programs + ADD CONSTRAINT programs_pkey PRIMARY KEY (id); + + +-- +-- Name: requisition_group_members requisition_group_members_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_group_members + ADD CONSTRAINT requisition_group_members_pkey PRIMARY KEY (requisitiongroupid, facilityid); + + +-- +-- Name: requisition_group_program_schedules requisition_group_program_schedule_unique_program_requisitiongr; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_group_program_schedules + ADD CONSTRAINT requisition_group_program_schedule_unique_program_requisitiongr UNIQUE (requisitiongroupid, programid) DEFERRABLE INITIALLY DEFERRED; + + +-- +-- Name: requisition_group_program_schedules requisition_group_program_schedules_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_group_program_schedules + ADD CONSTRAINT requisition_group_program_schedules_pkey PRIMARY KEY (id); + + +-- +-- Name: requisition_groups requisition_groups_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_groups + ADD CONSTRAINT requisition_groups_pkey PRIMARY KEY (id); + + +-- +-- Name: right_assignments right_assignment_unq; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_right_assignments + ADD CONSTRAINT right_assignment_unq UNIQUE (rightname, facilityid, programid, userid); + + +-- +-- Name: right_attachments right_attachments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_right_attachments + ADD CONSTRAINT right_attachments_pkey PRIMARY KEY (rightid, attachmentid); + + +-- +-- Name: rights rights_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_rights + ADD CONSTRAINT rights_pkey PRIMARY KEY (id); + + +-- +-- Name: role_assignments role_assignments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_role_assignments + ADD CONSTRAINT role_assignments_pkey PRIMARY KEY (id); + + +-- +-- Name: role_rights role_rights_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_role_rights + ADD CONSTRAINT role_rights_pkey PRIMARY KEY (roleid, rightid); + + +-- +-- Name: roles roles_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_roles + ADD CONSTRAINT roles_pkey PRIMARY KEY (id); + + +-- +-- Name: service_accounts service_accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_service_accounts + ADD CONSTRAINT service_accounts_pkey PRIMARY KEY (id); + + +-- +-- Name: supervisory_nodes supervisory_nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supervisory_nodes + ADD CONSTRAINT supervisory_nodes_pkey PRIMARY KEY (id); + + +-- +-- Name: supply_lines supply_line_unique_program_supervisory_node; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supply_lines + ADD CONSTRAINT supply_line_unique_program_supervisory_node UNIQUE (supervisorynodeid, programid); + + +-- +-- Name: supply_lines supply_lines_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supply_lines + ADD CONSTRAINT supply_lines_pkey PRIMARY KEY (id); + + +-- +-- Name: supply_partner_association_facilities supply_partner_association_facilities_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supply_partner_association_facilities + ADD CONSTRAINT supply_partner_association_facilities_pkey PRIMARY KEY (supplypartnerassociationid, facilityid); + + +-- +-- Name: supply_partner_association_orderables supply_partner_association_orderables_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supply_partner_association_orderables + ADD CONSTRAINT supply_partner_association_orderables_pkey PRIMARY KEY (supplypartnerassociationid, orderableid, orderableversionnumber); + + +-- +-- Name: supply_partner_associations supply_partner_associations_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supply_partner_associations + ADD CONSTRAINT supply_partner_associations_pkey PRIMARY KEY (id); + + +-- +-- Name: supply_partners supply_partners_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supply_partners + ADD CONSTRAINT supply_partners_pkey PRIMARY KEY (id); + + +-- +-- Name: supported_programs supported_programs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supported_programs + ADD CONSTRAINT supported_programs_pkey PRIMARY KEY (facilityid, programid); + + +-- +-- Name: system_notifications system_notifications_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_system_notifications + ADD CONSTRAINT system_notifications_pkey PRIMARY KEY (id); + + +-- +-- Name: trade_items trade_items_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_trade_items + ADD CONSTRAINT trade_items_pkey PRIMARY KEY (id); + + +-- +-- Name: rights uk_4f64k9vkx833wfpw8n25x2602; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_rights + ADD CONSTRAINT uk_4f64k9vkx833wfpw8n25x2602 UNIQUE (name); + + +-- +-- Name: users uk_6dotkott2kjsp8vw4d0m25fb7; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_users + ADD CONSTRAINT uk_6dotkott2kjsp8vw4d0m25fb7 UNIQUE (email); + + +-- +-- Name: supervisory_nodes uk_9vforn7hxhuinr8bmu0vkad3v; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_supervisory_nodes + ADD CONSTRAINT uk_9vforn7hxhuinr8bmu0vkad3v UNIQUE (code); + + +-- +-- Name: geographic_levels uk_by9o3bl6rafeuane589514s2v; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_geographic_levels + ADD CONSTRAINT uk_by9o3bl6rafeuane589514s2v UNIQUE (code); + + +-- +-- Name: facility_operators uk_g7ooo22v3vokh2qrqbxw7uaps; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_facility_operators + ADD CONSTRAINT uk_g7ooo22v3vokh2qrqbxw7uaps UNIQUE (code); + + +-- +-- Name: geographic_zones uk_jpns3ahywgm4k52rdfm08m9k0; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_geographic_zones + ADD CONSTRAINT uk_jpns3ahywgm4k52rdfm08m9k0 UNIQUE (code); + + +-- +-- Name: requisition_groups uk_nrqjt84p9wmrm1qmr7nokj8sg; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_groups + ADD CONSTRAINT uk_nrqjt84p9wmrm1qmr7nokj8sg UNIQUE (code); + + +-- +-- Name: trade_items uk_tradeitems_gtin; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_trade_items + ADD CONSTRAINT uk_tradeitems_gtin UNIQUE (gtin); + + +-- +-- Name: lots unq_lotcode_tradeitemid; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_lots + ADD CONSTRAINT unq_lotcode_tradeitemid UNIQUE (lotcode, tradeitemid); + + +-- +-- Name: orderable_children unq_orderable_parent_id; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_orderable_children + ADD CONSTRAINT unq_orderable_parent_id UNIQUE (orderableid, orderableversionnumber, parentid, parentversionnumber); + + +-- +-- Name: orderable_identifiers unq_orderableid_orderableversionid_key; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_orderable_identifiers + ADD CONSTRAINT unq_orderableid_orderableversionid_key UNIQUE (orderableid, orderableversionnumber, key); + + +-- +-- Name: orderables unq_productcode_versionid; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_orderables + ADD CONSTRAINT unq_productcode_versionid UNIQUE (code, versionnumber); + + +-- +-- Name: programs unq_program_code; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_programs + ADD CONSTRAINT unq_program_code UNIQUE (code); + + +-- +-- Name: trade_item_classifications unq_trade_item_classifications_system; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_trade_item_classifications + ADD CONSTRAINT unq_trade_item_classifications_system UNIQUE (tradeitemid, classificationsystem); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: facilities_geographiczoneid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facilities_geographiczoneid_idx ON public.kafka_facilities USING btree (geographiczoneid); + + +-- +-- Name: facilities_location_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facilities_location_idx ON public.kafka_facilities USING gist (location); + + +-- +-- Name: facilities_operatedbyid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facilities_operatedbyid_idx ON public.kafka_facilities USING btree (operatedbyid); + + +-- +-- Name: facilities_typeid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facilities_typeid_idx ON public.kafka_facilities USING btree (typeid); + + +-- +-- Name: facility_type_approved_products_facilitytypeid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facility_type_approved_products_facilitytypeid_idx ON public.kafka_facility_type_approved_products USING btree (facilitytypeid); + + +-- +-- Name: facility_type_approved_products_orderableid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facility_type_approved_products_orderableid_idx ON public.kafka_facility_type_approved_products USING btree (orderableid); + + +-- +-- Name: facility_type_approved_products_programid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX facility_type_approved_products_programid_idx ON public.kafka_facility_type_approved_products USING btree (programid); + + +-- +-- Name: ideal_stock_amounts_commoditytypeid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX ideal_stock_amounts_commoditytypeid_idx ON public.kafka_ideal_stock_amounts USING btree (commoditytypeid); + + +-- +-- Name: ideal_stock_amounts_facilityid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX ideal_stock_amounts_facilityid_idx ON public.kafka_ideal_stock_amounts USING btree (facilityid); + + +-- +-- Name: ideal_stock_amounts_processingperiodid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX ideal_stock_amounts_processingperiodid_idx ON public.kafka_ideal_stock_amounts USING btree (processingperiodid); + + +-- +-- Name: idx_orderable_children_orderable; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_orderable_children_orderable ON public.kafka_orderable_children USING btree (orderableid, orderableversionnumber); + + +-- +-- Name: idx_orderable_children_parent; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_orderable_children_parent ON public.kafka_orderable_children USING btree (parentid, parentversionnumber); + + +-- +-- Name: orderables_fullproductname_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX orderables_fullproductname_idx ON public.kafka_orderables USING btree (fullproductname); + + +-- +-- Name: processing_schedule_code_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX processing_schedule_code_unique_idx ON public.kafka_processing_schedules USING btree (lower(code)); + + +-- +-- Name: processing_schedule_name_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX processing_schedule_name_unique_idx ON public.kafka_processing_schedules USING btree (lower(name)); + + +-- +-- Name: program_orderables_orderabledisplaycategoryid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX program_orderables_orderabledisplaycategoryid_idx ON public.kafka_program_orderables USING btree (orderabledisplaycategoryid); + + +-- +-- Name: program_orderables_orderableid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX program_orderables_orderableid_idx ON public.kafka_program_orderables USING btree (orderableid); + + +-- +-- Name: program_orderables_orderableid_idx1; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX program_orderables_orderableid_idx1 ON public.kafka_program_orderables USING btree (orderableid); + + +-- +-- Name: program_orderables_programid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX program_orderables_programid_idx ON public.kafka_program_orderables USING btree (programid); + + +-- +-- Name: right_assignments_programid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX right_assignments_programid_idx ON public.kafka_right_assignments USING btree (programid); + + +-- +-- Name: right_assignments_userid_rightname_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX right_assignments_userid_rightname_idx ON public.kafka_right_assignments USING btree (userid, rightname); + + +-- +-- Name: role_assignments_userid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX role_assignments_userid_idx ON public.kafka_role_assignments USING btree (userid); + + +-- +-- Name: supervisory_nodes_parentid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX supervisory_nodes_parentid_idx ON public.kafka_supervisory_nodes USING btree (parentid); + + +-- +-- Name: supported_programs_facilityid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX supported_programs_facilityid_idx ON public.kafka_supported_programs USING btree (facilityid); + + +-- +-- Name: supported_programs_programid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX supported_programs_programid_idx ON public.kafka_supported_programs USING btree (programid); + + +-- +-- Name: system_notifications_active_authorid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX system_notifications_active_authorid_idx ON public.kafka_system_notifications USING btree (active, authorid); + + +-- +-- Name: unq_case_insensetive_supervisory_node_name; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_case_insensetive_supervisory_node_name ON public.kafka_supervisory_nodes USING btree (lower(name)); + + +-- +-- Name: unq_facility_code; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_facility_code ON public.kafka_facilities USING btree (lower(code)); + + +-- +-- Name: unq_facility_type_code; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_facility_type_code ON public.kafka_facility_types USING btree (lower(code)); + + +-- +-- Name: unq_ftap; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_ftap ON public.kafka_facility_type_approved_products USING btree (facilitytypeid, orderableid, programid) WHERE (active IS TRUE); + + +-- +-- Name: unq_programid_orderableid_orderableversionnumber; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_programid_orderableid_orderableversionnumber ON public.kafka_program_orderables USING btree (programid, orderableid, orderableversionnumber) WHERE (active = true); + + +-- +-- Name: unq_role_name; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_role_name ON public.kafka_roles USING btree (lower(name)); + + +-- +-- Name: unq_supervisory_node_case_insesetive_code; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_supervisory_node_case_insesetive_code ON public.kafka_supervisory_nodes USING btree (lower(code)); + + +-- +-- Name: unq_supply_partner_association_programid_supervisorynodeid; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_supply_partner_association_programid_supervisorynodeid ON public.kafka_supply_partner_associations USING btree (programid, supervisorynodeid, supplypartnerid); + + +-- +-- Name: unq_supply_partner_code; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_supply_partner_code ON public.kafka_supply_partners USING btree (lower(code)); + + +-- +-- Name: unq_username; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX unq_username ON public.kafka_users USING btree (lower(username)); + + +-- +-- Name: available_products; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_available_products ( + requisitionid uuid NOT NULL, + orderableid uuid, + orderableversionnumber bigint, + facilitytypeapprovedproductid uuid, + facilitytypeapprovedproductversionnumber bigint +); + + +ALTER TABLE public.kafka_available_products OWNER TO postgres; + +-- +-- Name: available_requisition_column_options; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_available_requisition_column_options ( + id uuid NOT NULL, + optionlabel character varying(255) NOT NULL, + optionname character varying(255) NOT NULL, + columnid uuid NOT NULL +); + + +ALTER TABLE public.kafka_available_requisition_column_options OWNER TO postgres; + +-- +-- Name: available_requisition_column_sources; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_available_requisition_column_sources ( + columnid uuid NOT NULL, + value character varying(255) +); + + +ALTER TABLE public.kafka_available_requisition_column_sources OWNER TO postgres; + +-- +-- Name: available_requisition_columns; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_available_requisition_columns ( + id uuid NOT NULL, + canbechangedbyuser boolean, + canchangeorder boolean, + columntype character varying(255) NOT NULL, + definition text, + indicator character varying(255), + isdisplayrequired boolean, + label character varying(255), + mandatory boolean, + name character varying(255), + supportstag boolean DEFAULT false +); + + +ALTER TABLE public.kafka_available_requisition_columns OWNER TO postgres; + +-- +-- Name: columns_maps; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_columns_maps ( + requisitiontemplateid uuid NOT NULL, + requisitioncolumnid uuid NOT NULL, + definition text, + displayorder integer NOT NULL, + indicator character varying(255), + isdisplayed boolean, + label character varying(255), + name character varying(255), + requisitioncolumnoptionid uuid, + source integer NOT NULL, + key character varying(255) NOT NULL, + tag character varying(255) +); + + +ALTER TABLE public.kafka_columns_maps OWNER TO postgres; + +-- +-- Name: configuration_settings; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_configuration_settings ( + key character varying(255) NOT NULL, + value text NOT NULL +); + + +ALTER TABLE public.kafka_configuration_settings OWNER TO postgres; + +-- +-- Name: jasper_template_parameter_dependencies; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_jasper_template_parameter_dependencies ( + id uuid NOT NULL, + parameterid uuid NOT NULL, + dependency text NOT NULL, + placeholder text NOT NULL +); + + +ALTER TABLE public.kafka_jasper_template_parameter_dependencies OWNER TO postgres; + +-- +-- Name: jasper_templates; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_jasper_templates ( + id uuid NOT NULL, + data bytea, + description text, + name text NOT NULL, + type text +); + + +ALTER TABLE public.kafka_jasper_templates OWNER TO postgres; + +-- +-- Name: jaspertemplateparameter_options; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_jaspertemplateparameter_options ( + jaspertemplateparameterid uuid NOT NULL, + options character varying(255) +); + + +ALTER TABLE public.kafka_jaspertemplateparameter_options OWNER TO postgres; + +-- +-- Name: previous_adjusted_consumptions; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_previous_adjusted_consumptions ( + requisitionlineitemid uuid NOT NULL, + previousadjustedconsumption integer +); + + +ALTER TABLE public.kafka_previous_adjusted_consumptions OWNER TO postgres; + +-- +-- Name: requisition_line_items; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_line_items ( + id uuid NOT NULL, + adjustedconsumption integer, + approvedquantity integer, + averageconsumption integer, + beginningbalance integer, + calculatedorderquantity integer, + maxperiodsofstock numeric(19,2), + maximumstockquantity integer, + nonfullsupply boolean, + numberofnewpatientsadded integer, + orderableid uuid, + packstoship bigint, + priceperpack numeric(19,2), + remarks character varying(250), + requestedquantity integer, + requestedquantityexplanation character varying(255), + skipped boolean, + stockonhand integer, + total integer, + totalconsumedquantity integer, + totalcost numeric(19,2), + totallossesandadjustments integer, + totalreceivedquantity integer, + totalstockoutdays integer, + requisitionid uuid, + idealstockamount integer, + calculatedorderquantityisa integer, + additionalquantityrequired integer, + orderableversionnumber bigint, + facilitytypeapprovedproductid uuid, + facilitytypeapprovedproductversionnumber bigint +); + + +ALTER TABLE public.kafka_requisition_line_items OWNER TO postgres; + +-- +-- Name: requisition_permission_strings; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_permission_strings ( + id uuid NOT NULL, + requisitionid uuid NOT NULL, + permissionstring text NOT NULL +); + + +ALTER TABLE public.kafka_requisition_permission_strings OWNER TO postgres; + +-- +-- Name: requisition_template_assignments; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_template_assignments ( + id uuid NOT NULL, + programid uuid NOT NULL, + facilitytypeid uuid, + templateid uuid NOT NULL +); + + +ALTER TABLE public.kafka_requisition_template_assignments OWNER TO postgres; + +-- +-- Name: requisition_templates; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisition_templates ( + id uuid NOT NULL, + createddate timestamp with time zone, + modifieddate timestamp with time zone, + numberofperiodstoaverage integer, + populatestockonhandfromstockcards boolean DEFAULT false NOT NULL, + archived boolean DEFAULT false NOT NULL, + name character varying(255) NOT NULL +); + + +ALTER TABLE public.kafka_requisition_templates OWNER TO postgres; + +-- +-- Name: requisitions; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisitions ( + id uuid NOT NULL, + createddate timestamp with time zone, + modifieddate timestamp with time zone, + draftstatusmessage text, + emergency boolean NOT NULL, + facilityid uuid NOT NULL, + numberofmonthsinperiod integer NOT NULL, + processingperiodid uuid NOT NULL, + programid uuid NOT NULL, + status character varying(255) NOT NULL, + supervisorynodeid uuid, + supplyingfacilityid uuid, + templateid uuid NOT NULL, + datephysicalstockcountcompleted date, + version bigint DEFAULT 0, + reportonly boolean, + extradata jsonb +); + + +ALTER TABLE public.kafka_requisitions OWNER TO postgres; + +-- +-- Name: requisitions_previous_requisitions; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_requisitions_previous_requisitions ( + requisitionid uuid NOT NULL, + previousrequisitionid uuid NOT NULL +); + + +ALTER TABLE public.kafka_requisitions_previous_requisitions OWNER TO postgres; + +-- +-- Name: status_changes; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_status_changes ( + id uuid NOT NULL, + createddate timestamp with time zone, + modifieddate timestamp with time zone, + authorid uuid, + status character varying(255) NOT NULL, + requisitionid uuid NOT NULL, + supervisorynodeid uuid +); + + +ALTER TABLE public.kafka_status_changes OWNER TO postgres; + +-- +-- Name: status_messages; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_status_messages ( + id uuid NOT NULL, + createddate timestamp with time zone, + modifieddate timestamp with time zone, + authorfirstname character varying(255), + authorid uuid, + authorlastname character varying(255), + body text NOT NULL, + status character varying(255) NOT NULL, + requisitionid uuid NOT NULL, + statuschangeid uuid NOT NULL +); + + +ALTER TABLE public.kafka_status_messages OWNER TO postgres; + +-- +-- Name: stock_adjustment_reasons; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_stock_adjustment_reasons ( + id uuid NOT NULL, + reasonid uuid NOT NULL, + description text, + isfreetextallowed boolean NOT NULL, + name text NOT NULL, + reasoncategory text NOT NULL, + reasontype text NOT NULL, + requisitionid uuid, + hidden boolean +); + + +ALTER TABLE public.kafka_stock_adjustment_reasons OWNER TO postgres; + +-- +-- Name: stock_adjustments; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_stock_adjustments ( + id uuid NOT NULL, + quantity integer NOT NULL, + reasonid uuid NOT NULL, + requisitionlineitemid uuid +); + + +ALTER TABLE public.kafka_stock_adjustments OWNER TO postgres; + +-- +-- Name: template_parameters; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.kafka_template_parameters ( + id uuid NOT NULL, + datatype text, + defaultvalue text, + description text, + displayname text, + name text, + selectexpression text, + templateid uuid NOT NULL, + selectproperty text, + displayproperty text, + required boolean, + selectmethod text, + selectbody text +); + + +ALTER TABLE public.kafka_template_parameters OWNER TO postgres; + +-- +-- Name: available_requisition_column_options available_requisition_column_options_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_available_requisition_column_options + ADD CONSTRAINT available_requisition_column_options_pkey PRIMARY KEY (id); + + +-- +-- Name: available_requisition_columns available_requisition_columns_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_available_requisition_columns + ADD CONSTRAINT available_requisition_columns_pkey PRIMARY KEY (id); + + +-- +-- Name: columns_maps columns_maps_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_columns_maps + ADD CONSTRAINT columns_maps_pkey PRIMARY KEY (requisitiontemplateid, key); + + +-- +-- Name: configuration_settings configuration_settings_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_configuration_settings + ADD CONSTRAINT configuration_settings_pkey PRIMARY KEY (key); + + +-- +-- Name: jasper_templates jasper_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_jasper_templates + ADD CONSTRAINT jasper_templates_pkey PRIMARY KEY (id); + + +-- +-- Name: requisition_line_items requisition_line_items_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_line_items + ADD CONSTRAINT requisition_line_items_pkey PRIMARY KEY (id); + + +-- +-- Name: requisition_permission_strings requisition_permission_strings_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_permission_strings + ADD CONSTRAINT requisition_permission_strings_pkey PRIMARY KEY (id); + + +-- +-- Name: requisition_template_assignments requisition_template_assignments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_template_assignments + ADD CONSTRAINT requisition_template_assignments_pkey PRIMARY KEY (id); + + +-- +-- Name: requisition_templates requisition_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisition_templates + ADD CONSTRAINT requisition_templates_pkey PRIMARY KEY (id); + + +-- +-- Name: requisitions requisitions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_requisitions + ADD CONSTRAINT requisitions_pkey PRIMARY KEY (id); + + +-- +-- Name: status_messages status_change_id_unique; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_status_messages + ADD CONSTRAINT status_change_id_unique UNIQUE (statuschangeid) DEFERRABLE INITIALLY DEFERRED; + + +-- +-- Name: status_changes status_changes_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_status_changes + ADD CONSTRAINT status_changes_pkey PRIMARY KEY (id); + + +-- +-- Name: status_messages status_messages_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_status_messages + ADD CONSTRAINT status_messages_pkey PRIMARY KEY (id); + + +-- +-- Name: stock_adjustment_reasons stock_adjustment_reasons_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_stock_adjustment_reasons + ADD CONSTRAINT stock_adjustment_reasons_pkey PRIMARY KEY (id); + + +-- +-- Name: stock_adjustments stock_adjustments_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_stock_adjustments + ADD CONSTRAINT stock_adjustments_pkey PRIMARY KEY (id); + + +-- +-- Name: template_parameters template_parameters_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_template_parameters + ADD CONSTRAINT template_parameters_pkey PRIMARY KEY (id); + + +-- +-- Name: jasper_templates uk_5878s5vb2v4y53vun95nrdvgw; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.kafka_jasper_templates + ADD CONSTRAINT uk_5878s5vb2v4y53vun95nrdvgw UNIQUE (name); + + +-- +-- Name: available_non_full_supply_products_requisitionid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX available_non_full_supply_products_requisitionid_idx ON public.kafka_available_products USING btree (requisitionid); + + +-- +-- Name: previous_adjusted_consumptions_requisitionlineitemid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX previous_adjusted_consumptions_requisitionlineitemid_idx ON public.kafka_previous_adjusted_consumptions USING btree (requisitionlineitemid); + + +-- +-- Name: req_line_reason; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX req_line_reason ON public.kafka_stock_adjustments USING btree (reasonid, requisitionlineitemid); + + +-- +-- Name: req_prod_fac_per; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX req_prod_fac_per ON public.kafka_requisitions USING btree (programid, facilityid, processingperiodid) WHERE ((emergency = false) AND (supervisorynodeid IS NULL)); + + +-- +-- Name: req_prod_fac_per_node; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX req_prod_fac_per_node ON public.kafka_requisitions USING btree (programid, facilityid, processingperiodid, supervisorynodeid) WHERE ((emergency = false) AND (supervisorynodeid IS NOT NULL)); + + +-- +-- Name: req_tmpl_asgmt_prog_fac_type_tmpl_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX req_tmpl_asgmt_prog_fac_type_tmpl_unique_idx ON public.kafka_requisition_template_assignments USING btree (facilitytypeid, programid, templateid) WHERE (facilitytypeid IS NOT NULL); + + +-- +-- Name: req_tmpl_asgmt_prog_fac_type_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX req_tmpl_asgmt_prog_fac_type_unique_idx ON public.kafka_requisition_template_assignments USING btree (facilitytypeid, programid) WHERE (facilitytypeid IS NOT NULL); + + +-- +-- Name: req_tmpl_asgmt_prog_tmpl_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX req_tmpl_asgmt_prog_tmpl_unique_idx ON public.kafka_requisition_template_assignments USING btree (programid, templateid) WHERE (facilitytypeid IS NULL); + + +-- +-- Name: requisition_line_items_requisitionid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX requisition_line_items_requisitionid_idx ON public.kafka_requisition_line_items USING btree (requisitionid); + +ALTER TABLE public.kafka_requisition_line_items CLUSTER ON requisition_line_items_requisitionid_idx; + + +-- +-- Name: requisition_permission_strings_requisitionid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX requisition_permission_strings_requisitionid_idx ON public.kafka_requisition_permission_strings USING btree (requisitionid); + + +-- +-- Name: requisition_template_name_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX requisition_template_name_unique_idx ON public.kafka_requisition_templates USING btree (lower((name)::text), archived) WHERE (archived IS FALSE); + + +-- +-- Name: requisitions_previous_requisitions_requisitionid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX requisitions_previous_requisitions_requisitionid_idx ON public.kafka_requisitions_previous_requisitions USING btree (requisitionid); + + +-- +-- Name: status_changes_requisitionid_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX status_changes_requisitionid_idx ON public.kafka_status_changes USING btree (requisitionid); + + +-- +-- Name: reporting_dates; Type: TABLE; Schema: referencedata; Owner: postgres +-- + +CREATE TABLE reporting_dates ( + due_days int, + late_days int, + country varchar +); + +ALTER TABLE reporting_dates OWNER TO postgres; + +-- Insert default values for reporting dates -- +INSERT INTO reporting_dates(due_days, late_days, country) + VALUES(14, 7, 'Malawi'), (14, 7, 'Mozambique'); + + +CREATE MATERIALIZED VIEW view_facility_access AS +SELECT DISTINCT u.username, facilityid, programid +FROM kafka_right_assignments ra + LEFT JOIN kafka_users u ON u.id = ra.userid +WHERE facilityid IS NOT NULL AND programid IS NOT NULL +UNION +SELECT DISTINCT 'admin', facilityid, programid +FROM kafka_right_assignments ra + LEFT JOIN kafka_users u ON u.id = ra.userid +WHERE facilityid IS NOT NULL AND programid IS NOT NULL AND u.username = 'administrator' +; + + +--- +--- Name: reporting_rate_and_timeliness; Type: TABLE; Schema: public; Owner: postgres +--- +CREATE MATERIALIZED VIEW reporting_rate_and_timeliness AS +SELECT f.name + , dgz.name AS district + , rgz.name AS region + , cgz.name AS country + , ft.name AS facility_type_name + , fo.name AS operator_name + , f.active AS facility_active_status + , final_authorized_requisitions.requisition_id AS req_id + , final_authorized_requisitions.facility_id + , final_authorized_requisitions.program_id + , final_authorized_requisitions.program_name + , final_authorized_requisitions.program_active_status + , final_authorized_requisitions.processing_period_id + , final_authorized_requisitions.processing_period_name + , final_authorized_requisitions.processing_schedule_name + , final_authorized_requisitions.processing_period_startdate + , final_authorized_requisitions.processing_period_enddate + , final_authorized_requisitions.emergency_status + , final_authorized_requisitions.created_date + , final_authorized_requisitions.modified_date + , sp.programid AS supported_program + , sp.active AS supported_program_active + , sp.startdate AS supported_program_startdate + , final_authorized_requisitions.status_change_date + , fa.facilityid AS facility + , fa.programid AS program + , fa.username + , CASE + WHEN final_authorized_requisitions.status_change_date::DATE <= (final_authorized_requisitions.processing_period_enddate::DATE + rd.due_days::INT) + AND final_authorized_requisitions.status = 'AUTHORIZED' THEN 'On time' + WHEN final_authorized_requisitions.status_change_date::DATE > (final_authorized_requisitions.processing_period_enddate::DATE + rd.due_days::INT + rd.late_days::INT) + AND final_authorized_requisitions.status = 'AUTHORIZED' THEN 'Unscheduled' + WHEN final_authorized_requisitions.status_change_date::DATE < (final_authorized_requisitions.processing_period_enddate::DATE + rd.due_days::INT + rd.late_days::INT) + AND final_authorized_requisitions.status_change_date::DATE >= (final_authorized_requisitions.processing_period_enddate::DATE + rd.due_days::INT) + AND final_authorized_requisitions.status = 'AUTHORIZED' THEN 'Late' + ELSE 'Did not report' + END AS reporting_timeliness +FROM kafka_facilities f + LEFT JOIN (SELECT ranked_authorized_requisitions.requisition_id + , ranked_authorized_requisitions.facility_id + , ranked_authorized_requisitions.program_id + , ranked_authorized_requisitions.program_name + , ranked_authorized_requisitions.program_active_status + , ranked_authorized_requisitions.processing_period_id + , ranked_authorized_requisitions.processing_period_name + , ranked_authorized_requisitions.processing_schedule_name + , ranked_authorized_requisitions.processing_period_startdate + , ranked_authorized_requisitions.processing_period_enddate + , ranked_authorized_requisitions.emergency_status + , ranked_authorized_requisitions.created_date + , ranked_authorized_requisitions.modified_date + , ranked_authorized_requisitions.status + , ranked_authorized_requisitions.status_change_date + , ranked_authorized_requisitions.rank + FROM (SELECT authorized_requisitions.requisition_id + , authorized_requisitions.facility_id + , authorized_requisitions.program_id + , authorized_requisitions.program_name + , authorized_requisitions.program_active_status + , authorized_requisitions.processing_period_id + , authorized_requisitions.processing_period_name + , authorized_requisitions.processing_schedule_name + , authorized_requisitions.processing_period_startdate + , authorized_requisitions.processing_period_enddate + , authorized_requisitions.emergency_status + , authorized_requisitions.created_date + , authorized_requisitions.modified_date + , authorized_requisitions.status + , authorized_requisitions.status_change_date + , rank() OVER (PARTITION BY authorized_requisitions.program_id, authorized_requisitions.facility_id, authorized_requisitions.processing_period_id ORDER BY authorized_requisitions.status_change_date DESC) AS rank + FROM (SELECT r.id AS requisition_id + , r.facilityid AS facility_id + , r.programid AS program_id + , p.name AS program_name + , p.active AS program_active_status + , r.processingperiodid AS processing_period_id + , pp.name AS processing_period_name + , ps.name AS processing_schedule_name + , pp.startdate AS processing_period_startdate + , pp.enddate AS processing_period_enddate + , r.emergency AS emergency_status + , r.createddate AS created_date + , r.modifieddate AS modified_date + , authorized_status_changes.status + , authorized_status_changes.createddate AS status_change_date + FROM kafka_requisitions r + LEFT JOIN (SELECT sc.requisitionid, sc.status, sc.createddate + FROM kafka_status_changes sc + WHERE sc.status = 'AUTHORIZED') authorized_status_changes ON authorized_status_changes.requisitionid = r.id + LEFT JOIN kafka_programs p ON p.id = r.programid + LEFT JOIN kafka_processing_periods pp ON pp.id = r.processingperiodid + LEFT JOIN kafka_processing_schedules ps ON ps.id = pp.processingscheduleid + ) authorized_requisitions + ORDER BY authorized_requisitions.facility_id, authorized_requisitions.processing_period_id, authorized_requisitions.status_change_date DESC) ranked_authorized_requisitions + WHERE ranked_authorized_requisitions.rank = 1) final_authorized_requisitions ON f.id = final_authorized_requisitions.facility_id + LEFT JOIN kafka_geographic_zones dgz ON dgz.id = f.geographiczoneid + LEFT JOIN kafka_geographic_zones rgz ON rgz.id = dgz.parentid + LEFT JOIN kafka_geographic_zones cgz ON cgz.id = rgz.parentid + LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid + LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid + LEFT JOIN reporting_dates rd ON rd.country = cgz.name + LEFT JOIN kafka_supported_programs sp ON sp.facilityid = f.id AND sp.programid = final_authorized_requisitions.program_id + LEFT JOIN view_facility_access fa ON fa.facilityid = f.id AND fa.programid = final_authorized_requisitions.program_id +ORDER BY final_authorized_requisitions.processing_period_enddate DESC +WITH DATA; + + +ALTER MATERIALIZED VIEW reporting_rate_and_timeliness OWNER TO postgres; + +--- +--- Name: adjustments; Type: TABLE; Schema: public; Owner: postgres +--- +CREATE MATERIALIZED VIEW adjustments AS +SELECT rli.id AS requisition_line_item_id + , r.id AS requisition_id + , r.createddate::DATE AS created_date + , r.modifieddate::DATE AS modified_date + , r.emergency AS emergency_status + , sn.name AS supervisory_node + , f.name AS facility_name + , ft.name AS facility_type_name + , fo.name AS facility_operator_name + , f.active AS facilty_active_status + , dgz.name AS district_name + , rgz.name AS region_name + , cgz.name AS country_name + , p.name AS program_name + , p.active AS program_active_status + , pp.name AS processing_period_name + , latest_orderables.id AS orderable_id + , latest_orderables.code AS product_code + , latest_orderables.fullproductname AS full_product_name + , oi.value AS trade_item_id + , rli.totallossesandadjustments AS total_losses_and_adjustments + , final_status_changes.status AS status + , final_status_changes.authorid AS author_id + , final_status_changes.createddate::DATE AS status_history_created_date + , sa.id AS adjustment_lines_id + , sa.quantity AS quantity + , sar.name AS stock_adjustment_reason + , fa.facilityid AS facility + , fa.programid AS program + , fa.username AS username +FROM kafka_requisitions r + LEFT JOIN kafka_requisition_line_items rli ON rli.requisitionid = r.id + LEFT JOIN kafka_supervisory_nodes sn ON sn.id = r.supervisorynodeid + LEFT JOIN kafka_facilities f ON f.id = r.facilityid + LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid + LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid + LEFT JOIN kafka_geographic_zones dgz ON dgz.id = f.geographiczoneid + LEFT JOIN kafka_geographic_zones rgz ON rgz.id = dgz.parentid + LEFT JOIN kafka_geographic_zones cgz ON cgz.id = rgz.parentid + LEFT JOIN kafka_programs p ON p.id = r.programid + LEFT JOIN kafka_processing_periods pp ON pp.id = r.processingperiodid + LEFT JOIN (SELECT DISTINCT ON (id) id, code, fullproductname, versionnumber FROM kafka_orderables ORDER BY id, versionnumber DESC) latest_orderables ON latest_orderables.id = rli.orderableid AND latest_orderables.versionnumber = rli.orderableversionnumber + LEFT JOIN kafka_orderable_identifiers oi ON oi.orderableid = latest_orderables.id AND oi.orderableversionnumber = latest_orderables.versionnumber AND oi.key = 'tradeItem' + LEFT JOIN (SELECT DISTINCT ON (requisitionid) id, requisitionid, status, authorid, createddate FROM kafka_status_changes ORDER BY requisitionid, createddate DESC) final_status_changes ON final_status_changes.requisitionid = r.id + LEFT JOIN kafka_stock_adjustments sa ON sa.requisitionlineitemid = rli.id + LEFT JOIN kafka_stock_adjustment_reasons sar ON sar.id = sa.reasonid AND sar.requisitionid = r.id + LEFT JOIN view_facility_access fa ON fa.facilityid = r.facilityid AND fa.programid = r.programid +WHERE final_status_changes.status NOT IN ('SKIPPED', 'INITIATED', 'SUBMITTED') +ORDER BY rli.id, fa.username, r.modifieddate DESC NULLS LAST +WITH DATA; + +ALTER MATERIALIZED VIEW adjustments OWNER TO postgres; + + +--- +--- Name: stock_status_and_consumption; Type: TABLE; Schema: public; Owner: postgres +--- +CREATE MATERIALIZED VIEW stock_status_and_consumption AS +SELECT li.requisition_line_item_id + , r.id + , r.createddate AS req_created_date + , r.modifieddate AS modified_date + , r.emergency AS emergency_status + , r.supplyingfacilityid AS supplying_facility + , r.supervisorynodeid AS supervisory_node + , r.facilityid AS facility_id + , f.code AS facility_code + , f.name AS facility_name + , f.active AS facilty_active_status + , dgz.id AS district_id + , dgz.code AS district_code + , dgz.name AS district_name + , rgz.id AS region_id + , rgz.code AS region_code + , rgz.name AS region_name + , cgz.id AS country_id + , cgz.code AS country_code + , cgz.name AS country_name + , ft.id AS facility_type_id + , ft.code AS facility_type_code + , ft.name AS facility_type_name + , fo.id AS facility_operator_id + , fo.code AS facility_operator_code + , fo.name AS facility_operator_name + , p.id AS program_id + , p.code AS program_code + , p.name AS program_name + , p.active AS program_active_status + , pp.id AS processing_period_id + , pp.name AS processing_period_name + , pp.startdate AS processing_period_startdate + , pp.enddate AS processing_period_enddate + , ps.id AS processing_schedule_id + , ps.code AS processing_schedule_code + , ps.name AS processing_schedule_name + , li.requisition_id AS li_req_id + , li.orderable_id + , li.product_code + , li.full_product_name + , li.trade_item_id + , li.beginning_balance + , li.total_consumed_quantity + , li.average_consumption + , li.total_losses_and_adjustments + , li.stock_on_hand + , li.total_stockout_days + , li.max_periods_of_stock + , li.calculated_order_quantity + , li.requested_quantity + , li.approved_quantity + , li.packs_to_ship + , li.price_per_pack + , li.total_cost + , li.total_received_quantity + , sc.requisitionid AS status_req_id + , sc.status AS req_status + , sc.authorid AS author_id + , sc.createddate AS status_date + , fa.facilityid AS facility + , fa.programid AS program + , fa.username + , li.closing_balance + , li.amc + , li.consumption + , li.adjusted_consumption + , li.order_quantity + , f.enabled as facility_status + , rd.due_days + , rd.late_days + , li.combined_stockout + , li.stock_status +FROM kafka_requisitions r + LEFT JOIN kafka_status_changes sc ON sc.requisitionid = r.id + LEFT JOIN kafka_facilities f ON f.id = r.facilityid + LEFT JOIN kafka_geographic_zones dgz ON dgz.id = f.geographiczoneid + LEFT JOIN kafka_geographic_zones rgz ON rgz.id = dgz.parentid + LEFT JOIN kafka_geographic_zones cgz ON cgz.id = rgz.parentid + LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid + LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid + LEFT JOIN kafka_programs p ON p.id = r.programid + LEFT JOIN kafka_processing_periods pp ON pp.id = r.processingperiodid + LEFT JOIN kafka_processing_schedules ps ON ps.id = pp.processingscheduleid + LEFT JOIN reporting_dates rd ON rd.country = cgz.name + LEFT JOIN view_facility_access fa ON fa.facilityid = f.id AND fa.programid = r.programid + LEFT JOIN (SELECT DISTINCT ON (rli.id) rli.id AS requisition_line_item_id + , requisitionid AS requisition_id + , rli.orderableid AS orderable_id + , latest_orderables.code AS product_code + , latest_orderables.fullproductname AS full_product_name + , oi.value AS trade_item_id + , beginningbalance AS beginning_balance + , totalconsumedquantity AS total_consumed_quantity + , averageconsumption AS average_consumption + , totallossesandadjustments AS total_losses_and_adjustments + , stockonhand AS stock_on_hand + , totalstockoutdays AS total_stockout_days + , maxperiodsofstock AS max_periods_of_stock + , calculatedorderquantity AS calculated_order_quantity + , requestedquantity AS requested_quantity + , approvedquantity AS approved_quantity + , packstoship AS packs_to_ship + , priceperpack AS price_per_pack + , totalcost AS total_cost + , totalreceivedquantity AS total_received_quantity + , SUM(stockonhand) AS closing_balance + , SUM(averageconsumption) AS amc + , SUM(totalconsumedquantity) AS consumption + , SUM(adjustedconsumption) AS adjusted_consumption + , SUM(approvedquantity) AS order_quantity + , CASE + WHEN (SUM(stockonhand) = 0 OR SUM(totalstockoutdays) > 0 OR SUM(beginningbalance) = 0 OR SUM(maxperiodsofstock) = 0) THEN 1 + ELSE 0 + END as combined_stockout + , CASE + WHEN SUM(maxperiodsofstock) > 6 THEN 'Overstocked' + WHEN SUM(maxperiodsofstock) < 3 AND (SUM(stockonhand) = 0 OR SUM(totalstockoutdays) > 0 OR SUM(beginningbalance) = 0 OR SUM(maxperiodsofstock) = 0) THEN 'Stocked Out' + WHEN SUM(maxperiodsofstock) < 3 AND SUM(maxperiodsofstock) > 0 AND NOT(SUM(stockonhand) = 0 OR SUM(totalstockoutdays) > 0 OR SUM(beginningbalance) = 0 OR SUM(maxperiodsofstock) = 0) THEN 'Understocked' + WHEN SUM(maxperiodsofstock) = 0 AND NOT(SUM(stockonhand) = 0 OR SUM(totalstockoutdays) > 0 OR SUM(beginningbalance) = 0 OR SUM(maxperiodsofstock) = 0) THEN 'Unknown' + ELSE 'Adequately stocked' + END as stock_status + FROM kafka_requisition_line_items rli + LEFT JOIN (SELECT DISTINCT ON (id) id, code, fullproductname, versionnumber FROM kafka_orderables ORDER BY id, versionnumber DESC) latest_orderables ON latest_orderables.id = rli.orderableid AND latest_orderables.versionnumber = rli.orderableversionnumber + LEFT JOIN kafka_orderable_identifiers oi ON oi.orderableid = latest_orderables.id AND oi.orderableversionnumber = latest_orderables.versionnumber AND oi.key = 'tradeItem' + GROUP BY rli.id + , requisitionid + , rli.orderableid + , latest_orderables.code + , latest_orderables.fullproductname + , oi.value + , beginningbalance + , totalconsumedquantity + , averageconsumption + , totallossesandadjustments + , stockonhand + , totalstockoutdays + , maxperiodsofstock + , calculatedorderquantity + , requestedquantity + , approvedquantity + , packstoship + , priceperpack + , totalcost + , totalreceivedquantity) li ON li.requisition_id = r.id +WITH DATA; + +ALTER MATERIALIZED VIEW stock_status_and_consumption OWNER TO postgres; + +CREATE MATERIALIZED VIEW facilities AS +SELECT f.code as code, f.name as name, gz.name as district, ft.name as type, fo.name as operator_name +FROM public.kafka_facilities f +left join public.kafka_geographic_zones gz on gz.id = f.geographiczoneid +left join public.kafka_facility_types ft on ft.id = f.typeid +left join public.kafka_facility_operators fo on fo.id = f.operatedbyid +WITH DATA; + +ALTER MATERIALIZED VIEW facilities OWNER TO postgres; + + +CREATE MATERIALIZED VIEW expired_products AS +SELECT "Facility Name" AS "Facility Name", + "Facility Type Code" AS "Facility Type Code", + "Program" AS "Program", + "Product Code" AS "Product Code", + "Full Product Name" AS "Full Product Name", + "Batch Number" AS "Batch Number", + "Expiration Date" AS "Expiration Date", + "Unit Price" AS "Unit Price", + "Total Cost" AS "Total Cost" +FROM + (SELECT f.name "Facility Name", + f.code "Facility Code", + f.description "Facility Description", + ft.name "Facility Type Name", + ft.code "Facility Type Code", + ft.description "Facility Type Description", + fo.name "Facility Operator", + fo.description "Facility Operator Description", + pp.name "Reporting Period", + pp.startdate "Processing Period Start Date", + pp.enddate "Processing Period End Date", + ps.name "Processing Schedule Name", + pp.description "Processing Period Description", + p.name "Program", + o.fullproductname "Full Product Name", + o.code "Product Code", + o.description "Product Description", + o.packroundingthreshold "Pack Rounding Threshold", + o.netcontent "Net Content", + o.lastupdated "Last Updated", + l.lotcode "Batch Number", + l.expirationdate "Expiration Date", + po.priceperpack "Unit Price", + rli.adjustedconsumption "Adjusted Consumption", + rli.approvedquantity "Approved Quantity", + rli.averageconsumption "Average Consumtion", + rli.beginningbalance "beginning Balance", + rli.calculatedorderquantity "Calculated Order Quantity", + rli.maxperiodsofstock "Maximum Periods of Stock", + rli.maximumstockquantity "Minimum Stock Quantity", + r.createddate "Requisition Creation Date", + r.modifieddate "Requisition Modification Date", + r.status "Requisition Status", + r.version "Requisition Version", + r.datephysicalstockcountcompleted "Date Physical Stockcount Completed", + rli.packstoship "Packs To Ship", + rli.priceperpack "Price Per Pack", + rli.remarks "Remarks", + rli.requestedquantity "Requested Quantity", + rli.requestedquantity "Requested Quantity Explanation", + rli.stockonhand "Stock On Hand", + rli.total "Total", + rli.totalconsumedquantity "Total Consumed Quantity", + rli.totalcost "Total Cost", + rli.totallossesandadjustments "Total Losses and Adjustments", + rli.totalreceivedquantity "Total Received Quantity", + rli.totalstockoutdays "Total Stockout Days", + -- rli.numberofpatientsontreatmentnextmonth "Number of Patients on Treatment Next Month", + -- rli.totalrequirement "Total Requirement", + rli.idealstockamount "Ideal Stock Amount", + -- rli.totalquantityneededbyhf "Total Quantity Needed by HF", + -- rli.quantitytoissue "Quantity to Issue", + -- rli.convertedquantitytoissue "Converted Quantity to Issue", + fa.username + FROM kafka_requisition_line_items rli + LEFT JOIN kafka_requisitions r ON rli.requisitionid = r.id + LEFT JOIN kafka_facilities f ON r.facilityid =f.id + LEFT JOIN kafka_facility_types ft ON f.typeid = ft.id + LEFT JOIN kafka_programs p ON p.id = r.programid + LEFT JOIN kafka_orderables o ON o.id = rli.orderableid + LEFT JOIN kafka_processing_periods pp ON pp.id = r.processingperiodid + LEFT JOIN kafka_processing_schedules ps ON ps.id = pp.processingscheduleid + LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid + LEFT JOIN kafka_orderable_identifiers oi ON oi.orderableid =o.id + LEFT JOIN kafka_lots l ON l.tradeitemid = oi.value::uuid + --LEFT JOIN kafka_stock_event_line_items seli ON seli.orderableid =o.id + LEFT JOIN kafka_program_orderables po ON po.orderableid =o.id + LEFT JOIN view_facility_access fa ON fa.facilityid = f.id) AS expired +WITH DATA; + +ALTER MATERIALIZED VIEW expired_products OWNER TO postgres; + +CREATE MATERIALIZED VIEW stock_card_summaries AS +SELECT + f.code AS "Facility Code", + f.name AS "Facility", + ft.name AS "Facility Type", + fo.name AS "Facility Operated By", + p.name AS "Program", + o.code AS "Product Code", + o.fullproductname AS "Product", + o.netcontent AS "Pack Size", + lots.lotcode AS "Batch Number", + lots.expirationdate AS "Expiry Date", + stock_summary.remaining_stock_on_hand, + CASE + WHEN lots.expirationdate IS NOT NULL AND lots.expirationdate < CURRENT_DATE THEN TRUE + ELSE FALSE + END AS is_expired, + fa.username +FROM kafka_stock_cards stc +INNER JOIN ( + SELECT + stcli.stockcardid, + SUM( + CASE + WHEN COALESCE(stclire.reasontype, 'CREDIT') = 'CREDIT' THEN stcli.quantity + WHEN stclire.reasontype = 'DEBIT' THEN -stcli.quantity + ELSE 0 + END + ) AS remaining_stock_on_hand + FROM kafka_stock_card_line_items stcli + LEFT JOIN kafka_stock_card_line_item_reasons stclire + ON stclire.id = stcli.reasonid + GROUP BY stcli.stockcardid +) AS stock_summary + ON stc.id = stock_summary.stockcardid +LEFT JOIN kafka_lots lots ON lots.id = stc.lotid::uuid +LEFT JOIN kafka_facilities f ON f.id = stc.facilityid::uuid +LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid::uuid +LEFT JOIN kafka_programs p ON p.id = stc.programid::uuid +LEFT JOIN kafka_orderables o ON o.id = stc.orderableid::uuid +LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid::uuid +LEFT JOIN view_facility_access fa ON fa.facilityid = f.id +WITH DATA; + +ALTER MATERIALIZED VIEW stock_card_summaries OWNER TO postgres; + +DROP MATERIALIZED VIEW IF EXISTS stock_card_summaries_with_prices; + +CREATE MATERIALIZED VIEW stock_card_summaries_with_prices AS +WITH latest_prices AS ( + SELECT DISTINCT ON (orderableid, programid) + orderableid, + programid, + priceperpack::numeric, + orderableversionnumber + FROM kafka_program_orderables + WHERE active IS TRUE + ORDER BY orderableid, programid, orderableversionnumber DESC +) +SELECT DISTINCT + f.code AS "Facility Code", + f.name AS "Facility", + ft.name AS "Facility Type", + fo.name AS "Facility Operated By", + p.name AS "Program", + o.code AS "Product Code", + o.fullproductname AS "Product", + o.netcontent AS "Pack Size", + lots.lotcode AS "Batch Number", + lots.expirationdate AS "Expiry Date", + stock_summary.remaining_stock_on_hand, + CASE + WHEN lots.expirationdate IS NOT NULL AND lots.expirationdate < CURRENT_DATE THEN TRUE + ELSE FALSE + END AS is_expired, + lp.priceperpack AS "Pack Cost (LSL)", + CASE + WHEN o.netcontent > 0 THEN ROUND(stock_summary.remaining_stock_on_hand / o.netcontent * lp.priceperpack, 2) + ELSE NULL + END AS "Total Cost (LSL)", + fa.username +FROM kafka_stock_cards stc +INNER JOIN ( + SELECT + stcli.stockcardid, + SUM( + CASE + WHEN COALESCE(stclire.reasontype, 'CREDIT') = 'CREDIT' THEN stcli.quantity + WHEN stclire.reasontype = 'DEBIT' THEN -stcli.quantity + ELSE 0 + END + ) AS remaining_stock_on_hand + FROM kafka_stock_card_line_items stcli + LEFT JOIN kafka_stock_card_line_item_reasons stclire + ON stclire.id = stcli.reasonid + GROUP BY stcli.stockcardid +) AS stock_summary + ON stc.id = stock_summary.stockcardid +LEFT JOIN kafka_lots lots ON lots.id = stc.lotid::uuid +LEFT JOIN kafka_facilities f ON f.id = stc.facilityid::uuid +LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid::uuid +LEFT JOIN kafka_programs p ON p.id = stc.programid::uuid +LEFT JOIN kafka_orderables o ON o.id = stc.orderableid::uuid +LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid::uuid +LEFT JOIN view_facility_access fa ON fa.facilityid = f.id +LEFT JOIN latest_prices lp ON + lp.orderableid = stc.orderableid::uuid AND + lp.programid = stc.programid::uuid +WITH DATA; + +ALTER MATERIALIZED VIEW stock_card_summaries_with_prices OWNER TO postgres; diff --git a/reporting/db/docker-entrypoint-initdb.d/templates/OlmisCreateTableStatements.sql b/reporting/db/docker-entrypoint-initdb.d/templates/OlmisCreateTableStatements.sql index f0053bc5..cdde2c69 100644 --- a/reporting/db/docker-entrypoint-initdb.d/templates/OlmisCreateTableStatements.sql +++ b/reporting/db/docker-entrypoint-initdb.d/templates/OlmisCreateTableStatements.sql @@ -2,6 +2,7 @@ -- Created by Craig Appl (cappl@ona.io) -- Modified by A. Maritim (amaritim@ona.io) and J. Wambere (jwambere@ona.io) -- Further modified by C. Ahn (chongsun.ahn@villagereach.org) +-- Further modified by Lesotho eLMIS team in April 2025 -- Last Updated 19 May 2020 -- @@ -648,6 +649,120 @@ CREATE TABLE public.kafka_users ( ALTER TABLE public.kafka_users OWNER TO postgres; +-- Kafka Stock Cards Table +CREATE TABLE public.kafka_stock_cards ( + id uuid NOT NULL, + facilityid uuid NOT NULL, + lotid uuid, + orderableid uuid NOT NULL, + programid uuid NOT NULL, + origineventid uuid NOT NULL, + isshowed boolean DEFAULT true, + isactive boolean DEFAULT true +); + +-- Kafka Stock Card Line Items Table +CREATE TABLE public.kafka_stock_card_line_items ( + id uuid NOT NULL, + stockcardid uuid NOT NULL, + quantity integer NOT NULL, + reasonid uuid, + -- occurreddate date NOT NULL, + -- processeddate timestamp without time zone NOT NULL, + destinationfreetext character varying(255), + documentnumber character varying(255), + reasonfreetext character varying(255), + signature character varying(255), + sourcefreetext character varying(255), + userid uuid NOT NULL, + destinationid uuid, + origineventid uuid NOT NULL, + sourceid uuid, + cartonnumber character varying(255), + invoicenumber character varying(255), + referencenumber character varying(255), + unitprice double precision, + extradata jsonb +); + + +-- Kafka Stock Card Line Item Reasons Table +CREATE TABLE public.kafka_stock_card_line_item_reasons ( + id uuid NOT NULL, + name text NOT NULL, + description text, + isfreetextallowed boolean NOT NULL, + reasoncategory text NOT NULL, + reasontype text NOT NULL +); + +-- Ownership +ALTER TABLE public.kafka_stock_cards OWNER TO postgres; +ALTER TABLE public.kafka_stock_card_line_items OWNER TO postgres; +ALTER TABLE public.kafka_stock_card_line_item_reasons OWNER TO postgres; + +ALTER TABLE ONLY public.kafka_stock_cards + ADD CONSTRAINT kafka_stock_cards_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.kafka_stock_card_line_items + ADD CONSTRAINT kafka_stock_card_line_items_pkey PRIMARY KEY (id); + +ALTER TABLE ONLY public.kafka_stock_card_line_item_reasons + ADD CONSTRAINT kafka_stock_card_line_item_reasons_pkey PRIMARY KEY (id); + +-- ========================================== +-- Indexes for Kafka Stock Cards Table +-- ========================================== + +-- Index to speed up lookups by facility ID +CREATE INDEX kafka_stock_cards_facilityid_idx +ON public.kafka_stock_cards USING btree (facilityid); + +-- Index to optimize queries by orderable ID +CREATE INDEX kafka_stock_cards_orderableid_idx +ON public.kafka_stock_cards USING btree (orderableid); + +-- Index to improve performance for program-based searches +CREATE INDEX kafka_stock_cards_programid_idx +ON public.kafka_stock_cards USING btree (programid); + +-- Index to quickly retrieve records based on lot ID +CREATE INDEX kafka_stock_cards_lotid_idx +ON public.kafka_stock_cards USING btree (lotid); + +-- Index to enhance performance when querying by origin event ID +CREATE INDEX kafka_stock_cards_origineventid_idx +ON public.kafka_stock_cards USING btree (origineventid); + +-- ========================================== +-- Indexes for Kafka Stock Card Line Items Table +-- ========================================== + +-- Index to speed up lookups by stock card ID +CREATE INDEX kafka_stock_card_line_items_stockcardid_idx +ON public.kafka_stock_card_line_items USING btree (stockcardid); + +-- Index to improve performance when searching by reason ID +CREATE INDEX kafka_stock_card_line_items_reasonid_idx +ON public.kafka_stock_card_line_items USING btree (reasonid); + +-- Index to optimize user-based searches +CREATE INDEX kafka_stock_card_line_items_userid_idx +ON public.kafka_stock_card_line_items USING btree (userid); + +-- Index to enhance performance for origin event ID searches +CREATE INDEX kafka_stock_card_line_items_origineventid_idx +ON public.kafka_stock_card_line_items USING btree (origineventid); + +-- ========================================== +-- Indexes for Kafka Stock Card Line Item Reasons Table +-- ========================================== + +-- Index to speed up lookups by reason name +CREATE INDEX kafka_stock_card_line_item_reasons_name_idx +ON public.kafka_stock_card_line_item_reasons USING btree (name); + + -- -- Name: commodity_types commodity_types_classificationsystem_classificationid_key; Type: CONSTRAINT; Schema: public; Owner: postgres -- @@ -2305,4 +2420,200 @@ left join public.kafka_facility_types ft on ft.id = f.typeid left join public.kafka_facility_operators fo on fo.id = f.operatedbyid WITH DATA; -ALTER MATERIALIZED VIEW facilities OWNER TO postgres; \ No newline at end of file +ALTER MATERIALIZED VIEW facilities OWNER TO postgres; + + +CREATE MATERIALIZED VIEW expired_products AS +SELECT "Facility Name" AS "Facility Name", + "Facility Type Code" AS "Facility Type Code", + "Program" AS "Program", + "Product Code" AS "Product Code", + "Full Product Name" AS "Full Product Name", + "Batch Number" AS "Batch Number", + "Expiration Date" AS "Expiration Date", + "Unit Price" AS "Unit Price", + "Total Cost" AS "Total Cost" +FROM + (SELECT f.name "Facility Name", + f.code "Facility Code", + f.description "Facility Description", + ft.name "Facility Type Name", + ft.code "Facility Type Code", + ft.description "Facility Type Description", + fo.name "Facility Operator", + fo.description "Facility Operator Description", + pp.name "Reporting Period", + pp.startdate "Processing Period Start Date", + pp.enddate "Processing Period End Date", + ps.name "Processing Schedule Name", + pp.description "Processing Period Description", + p.name "Program", + o.fullproductname "Full Product Name", + o.code "Product Code", + o.description "Product Description", + o.packroundingthreshold "Pack Rounding Threshold", + o.netcontent "Net Content", + o.lastupdated "Last Updated", + l.lotcode "Batch Number", + l.expirationdate "Expiration Date", + po.priceperpack "Unit Price", + rli.adjustedconsumption "Adjusted Consumption", + rli.approvedquantity "Approved Quantity", + rli.averageconsumption "Average Consumtion", + rli.beginningbalance "beginning Balance", + rli.calculatedorderquantity "Calculated Order Quantity", + rli.maxperiodsofstock "Maximum Periods of Stock", + rli.maximumstockquantity "Minimum Stock Quantity", + r.createddate "Requisition Creation Date", + r.modifieddate "Requisition Modification Date", + r.status "Requisition Status", + r.version "Requisition Version", + r.datephysicalstockcountcompleted "Date Physical Stockcount Completed", + rli.packstoship "Packs To Ship", + rli.priceperpack "Price Per Pack", + rli.remarks "Remarks", + rli.requestedquantity "Requested Quantity", + rli.requestedquantity "Requested Quantity Explanation", + rli.stockonhand "Stock On Hand", + rli.total "Total", + rli.totalconsumedquantity "Total Consumed Quantity", + rli.totalcost "Total Cost", + rli.totallossesandadjustments "Total Losses and Adjustments", + rli.totalreceivedquantity "Total Received Quantity", + rli.totalstockoutdays "Total Stockout Days", + -- rli.numberofpatientsontreatmentnextmonth "Number of Patients on Treatment Next Month", + -- rli.totalrequirement "Total Requirement", + rli.idealstockamount "Ideal Stock Amount", + -- rli.totalquantityneededbyhf "Total Quantity Needed by HF", + -- rli.quantitytoissue "Quantity to Issue", + -- rli.convertedquantitytoissue "Converted Quantity to Issue", + fa.username + FROM kafka_requisition_line_items rli + LEFT JOIN kafka_requisitions r ON rli.requisitionid = r.id + LEFT JOIN kafka_facilities f ON r.facilityid =f.id + LEFT JOIN kafka_facility_types ft ON f.typeid = ft.id + LEFT JOIN kafka_programs p ON p.id = r.programid + LEFT JOIN kafka_orderables o ON o.id = rli.orderableid + LEFT JOIN kafka_processing_periods pp ON pp.id = r.processingperiodid + LEFT JOIN kafka_processing_schedules ps ON ps.id = pp.processingscheduleid + LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid + LEFT JOIN kafka_orderable_identifiers oi ON oi.orderableid =o.id + LEFT JOIN kafka_lots l ON l.tradeitemid = oi.value::uuid + --LEFT JOIN kafka_stock_event_line_items seli ON seli.orderableid =o.id + LEFT JOIN kafka_program_orderables po ON po.orderableid =o.id + LEFT JOIN view_facility_access fa ON fa.facilityid = f.id) AS expired +WITH DATA; + +ALTER MATERIALIZED VIEW expired_products OWNER TO postgres; + +CREATE MATERIALIZED VIEW stock_card_summaries AS +SELECT + f.code AS "Facility Code", + f.name AS "Facility", + ft.name AS "Facility Type", + fo.name AS "Facility Operated By", + p.name AS "Program", + o.code AS "Product Code", + o.fullproductname AS "Product", + o.netcontent AS "Pack Size", + lots.lotcode AS "Batch Number", + lots.expirationdate AS "Expiry Date", + stock_summary.remaining_stock_on_hand, + CASE + WHEN lots.expirationdate IS NOT NULL AND lots.expirationdate < CURRENT_DATE THEN TRUE + ELSE FALSE + END AS is_expired, + fa.username +FROM kafka_stock_cards stc +INNER JOIN ( + SELECT + stcli.stockcardid, + SUM( + CASE + WHEN COALESCE(stclire.reasontype, 'CREDIT') = 'CREDIT' THEN stcli.quantity + WHEN stclire.reasontype = 'DEBIT' THEN -stcli.quantity + ELSE 0 + END + ) AS remaining_stock_on_hand + FROM kafka_stock_card_line_items stcli + LEFT JOIN kafka_stock_card_line_item_reasons stclire + ON stclire.id = stcli.reasonid + GROUP BY stcli.stockcardid +) AS stock_summary + ON stc.id = stock_summary.stockcardid +LEFT JOIN kafka_lots lots ON lots.id = stc.lotid::uuid +LEFT JOIN kafka_facilities f ON f.id = stc.facilityid::uuid +LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid::uuid +LEFT JOIN kafka_programs p ON p.id = stc.programid::uuid +LEFT JOIN kafka_orderables o ON o.id = stc.orderableid::uuid +LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid::uuid +LEFT JOIN view_facility_access fa ON fa.facilityid = f.id +WITH DATA; + +ALTER MATERIALIZED VIEW stock_card_summaries OWNER TO postgres; + +DROP MATERIALIZED VIEW IF EXISTS stock_card_summaries_with_prices; + +CREATE MATERIALIZED VIEW stock_card_summaries_with_prices AS +WITH latest_prices AS ( + SELECT DISTINCT ON (orderableid, programid) + orderableid, + programid, + priceperpack::numeric, + orderableversionnumber + FROM kafka_program_orderables + WHERE active IS TRUE + ORDER BY orderableid, programid, orderableversionnumber DESC +) +SELECT DISTINCT + f.code AS "Facility Code", + f.name AS "Facility", + ft.name AS "Facility Type", + fo.name AS "Facility Operated By", + p.name AS "Program", + o.code AS "Product Code", + o.fullproductname AS "Product", + o.netcontent AS "Pack Size", + lots.lotcode AS "Batch Number", + lots.expirationdate AS "Expiry Date", + stock_summary.remaining_stock_on_hand, + CASE + WHEN lots.expirationdate IS NOT NULL AND lots.expirationdate < CURRENT_DATE THEN TRUE + ELSE FALSE + END AS is_expired, + lp.priceperpack AS "Pack Cost (LSL)", + CASE + WHEN o.netcontent > 0 THEN ROUND(stock_summary.remaining_stock_on_hand / o.netcontent * lp.priceperpack, 2) + ELSE NULL + END AS "Total Cost (LSL)", + fa.username +FROM kafka_stock_cards stc +INNER JOIN ( + SELECT + stcli.stockcardid, + SUM( + CASE + WHEN COALESCE(stclire.reasontype, 'CREDIT') = 'CREDIT' THEN stcli.quantity + WHEN stclire.reasontype = 'DEBIT' THEN -stcli.quantity + ELSE 0 + END + ) AS remaining_stock_on_hand + FROM kafka_stock_card_line_items stcli + LEFT JOIN kafka_stock_card_line_item_reasons stclire + ON stclire.id = stcli.reasonid + GROUP BY stcli.stockcardid +) AS stock_summary + ON stc.id = stock_summary.stockcardid +LEFT JOIN kafka_lots lots ON lots.id = stc.lotid::uuid +LEFT JOIN kafka_facilities f ON f.id = stc.facilityid::uuid +LEFT JOIN kafka_facility_types ft ON ft.id = f.typeid::uuid +LEFT JOIN kafka_programs p ON p.id = stc.programid::uuid +LEFT JOIN kafka_orderables o ON o.id = stc.orderableid::uuid +LEFT JOIN kafka_facility_operators fo ON fo.id = f.operatedbyid::uuid +LEFT JOIN view_facility_access fa ON fa.facilityid = f.id +LEFT JOIN latest_prices lp ON + lp.orderableid = stc.orderableid::uuid AND + lp.programid = stc.programid::uuid +WITH DATA; + +ALTER MATERIALIZED VIEW stock_card_summaries_with_prices OWNER TO postgres; diff --git a/reporting/docker-compose.local.yml b/reporting/docker-compose.local.yml index d152e921..93c4a4d8 100644 --- a/reporting/docker-compose.local.yml +++ b/reporting/docker-compose.local.yml @@ -112,7 +112,7 @@ services: - 8083:8083 networks: - default - - openlmis-ref-distro_default + - lesotho-ref-distro_default environment: - BOOTSTRAP_SERVERS=kafka:29092 - GROUP_ID=1 @@ -180,5 +180,5 @@ volumes: external: false networks: - openlmis-ref-distro_default: + lesotho-ref-distro_default: external: true diff --git a/reporting/docker-compose.yml b/reporting/docker-compose.yml index d7ca3e2e..0e952ec7 100644 --- a/reporting/docker-compose.yml +++ b/reporting/docker-compose.yml @@ -37,9 +37,11 @@ services: nginx: image: openlmis/nginx:${OL_NGINX_VERSION} - ports: - - "${OL_HTTP_PORT:-80}:80" - - "${OL_HTTPS_PORT:-443}:443" + #free up ports 80 and 443 for caddy + #ports: + # - "${OL_HTTP_PORT:-80}:80" + # - "${OL_HTTPS_PORT:-443}:443" + # bind HTTP only on localhost:8080 env_file: settings.env environment: NGINX_LOG_DIR: '/var/log/nginx/log' @@ -95,6 +97,16 @@ services: - reporting-db depends_on: [log, db-service-configuration] + db-on-host-init: + image: postgres:14 + volumes: + - ./db-on-host-init:/db-on-host-init + env_file: settings.env + command: ["/bin/bash", "-c", "/db-on-host-init/reporting-db-on-host.sh"] + depends_on: [log] + restart: "no" + + cron-config: build: context: ./cron @@ -152,6 +164,7 @@ services: - CONFIG_STORAGE_TOPIC=my_connect_configs - OFFSET_STORAGE_TOPIC=my_connect_offsets - HOST_NAME=0.0.0.0 + - KAFKA_HEAP_OPTS=-Xms512M -Xmx4G logging: driver: syslog options: @@ -196,12 +209,27 @@ services: - KAFKA_REST_PROXY_URL=http://kafka-rest:8082 - PROXY=true - MAX_BYTES=500000 + - CADDY_AUTOHTTPS=off # Disable automatic HTTPS and DNS logging: driver: syslog options: syslog-address: "udp://127.0.0.1:${SYSLOG_UDP_PORT}" depends_on: [kafka-rest] + akhq: + image: tchiotludo/akhq:latest + ports: + - 8089:8080 + environment: + - AKHQ_CONFIGURATION=/app/config/application.yml + volumes: + - ./config/services/akhq/akhq.yml:/app/config/application.yml + logging: + driver: syslog + options: + syslog-address: "udp://127.0.0.1:${SYSLOG_UDP_PORT}" + depends_on: [kafka] + superset: build: context: ./superset @@ -222,6 +250,19 @@ services: depends_on: [db, config-container, nginx] env_file: settings.env + caddy: + image: caddy:2 + restart: unless-stopped + ports: + - "80:80" # public HTTP + - "443:443" # public HTTPS + volumes: + - ./Caddyfile:/etc/caddy/Caddyfile:ro + - caddy_data:/data + - caddy_config:/config + networks: + - default + volumes: syslog: external: false @@ -231,3 +272,7 @@ volumes: external: false cron-periodic-volume: external: false + caddy_data: + external: false + caddy_config: + external: false diff --git a/reporting/settings-sample.env b/reporting/settings-sample.env index ffe4f6a3..89a54fe4 100644 --- a/reporting/settings-sample.env +++ b/reporting/settings-sample.env @@ -14,6 +14,9 @@ SRC_POSTGRES_PORT=5432 SRC_POSTGRES_DB=open_lmis SRC_POSTGRES_USER=postgres SRC_POSTGRES_PASSWORD=p@ssw0rd +## These were added to support setting up a DB outside docker +POSTGRES_HOST=localhost +POSTGRES_PORT=5433 ### Reporting Stack Database ### # The database URL for the Reporting Stack database @@ -71,6 +74,8 @@ SUPERSET_SECRET_KEY=changeme # Disabling SSL check in Superset service. By default sign-in via OAUTH requires OpenLMIS with HTTPS security # Note: Comment out this variable if you use it on production # OAUTHLIB_INSECURE_TRANSPORT=1 +#Add URL to your superset +SUPERSET_URL= # The domain name to use for Superset SUPERSET_DOMAIN_NAME=superset.local diff --git a/reporting/superset/Dockerfile b/reporting/superset/Dockerfile index 5bfb7aba..238d49c0 100644 --- a/reporting/superset/Dockerfile +++ b/reporting/superset/Dockerfile @@ -25,6 +25,7 @@ ENV GUNICORN_BIND=0.0.0.0:8088 \ SUPERSET_HOME=/var/lib/superset \ APP_DIR=${APP_DIR} \ WORKER_CLASS=gevent + ENV GUNICORN_CMD_ARGS="--workers ${GUNICORN_WORKERS} -k ${WORKER_CLASS} --timeout ${GUNICORN_TIMEOUT} --bind ${GUNICORN_BIND} --limit-request-line ${GUNICORN_LIMIT_REQUEST_LINE} --limit-request-field_size ${GUNICORN_LIMIT_REQUEST_FIELD_SIZE}" COPY requirements.txt requirements.txt @@ -46,12 +47,14 @@ RUN useradd -U -m superset && \ postgresql-client \ libssl-dev \ libffi-dev \ - python-dev \ + python3-dev \ libsasl2-dev \ - libldap2-dev && \ + libldap2-dev \ + libjpeg-dev \ + libpng-dev && \ apt-get clean && \ rm -r /var/lib/apt/lists/* && \ - pip install --upgrade setuptools pip && \ + pip install --upgrade setuptools "pip<24.1" && \ pip install --no-cache-dir \ gunicorn==20.1.0 \ gevent==22.10.2 \ @@ -60,15 +63,18 @@ RUN useradd -U -m superset && \ apache-superset[cors]==${SUPERSET_VERSION} \ flask-mail==0.9.1 \ flask-oauth==0.12 \ - flask_oauthlib==0.9.5 && \ - # MarkupSafe, fix missing 'soft_unicode' from 'markupsafe' - # fix cryptography, flask, werkzeug to last known working versions + flask_oauthlib==0.9.5 \ + Flask-Migrate==3.1.0 \ + alembic==1.8.1 \ + redis==4.5.5 && \ + # Fix missing MarkupSafe 'soft_unicode' and ensure Flask compatibility pip install -I --no-cache-dir \ MarkupSafe==2.0.1 \ cryptography==38.0.2 \ flask==1.1.4 \ werkzeug==1.0.1 \ - psycopg2-binary==2.9.1 + psycopg2-binary==2.9.1 \ + pillow # Ensure PIL/Pillow is installed for screenshots # Configure Filesystem RUN find ${APP_DIR} \! -type l -print0 | xargs -0 chown superset:superset @@ -93,10 +99,10 @@ ENV PATH=$NVM_DIR/versions/node/$NODE_VERSION/bin:$PATH RUN node -v RUN npm -v -# Installing dependecies via npm +# Installing dependencies via npm RUN npm install -g po2json -# Fetching dependecies and first build +# Fetching dependencies and first build RUN wget -P /tmp https://github.com/apache/superset/archive/${SUPERSET_VERSION}.zip \ && unzip /tmp/${SUPERSET_VERSION}.zip -d /tmp \ && rsync -a \ @@ -111,4 +117,8 @@ RUN cd $APP_DIR/superset-frontend && npm ci # Deploy application EXPOSE 8088 HEALTHCHECK CMD ["curl", "-f", "http://localhost:8088/health"] + +# Use Redis for caching instead of the metadata database +ENV CACHE_CONFIG='{"CACHE_TYPE": "RedisCache", "CACHE_REDIS_URL": "redis://redis:6379/0"}' + CMD ["gunicorn", "superset:app"] diff --git a/reporting/superset/Dockerfile.original b/reporting/superset/Dockerfile.original new file mode 100644 index 00000000..56148dbb --- /dev/null +++ b/reporting/superset/Dockerfile.original @@ -0,0 +1,114 @@ +FROM python:3.8.13 + +# Superset version +ARG SUPERSET_VERSION=1.5.2 + +# Superset-patchup (Ketchup) version +ARG SUPERSET_PATCHUP_VERSION=v0.5.1 + +# Default Superset files dir +ARG APP_DIR=/usr/local/lib/python3.8/site-packages/superset + +# Configure environment +ENV GUNICORN_BIND=0.0.0.0:8088 \ + GUNICORN_LIMIT_REQUEST_FIELD_SIZE=0 \ + GUNICORN_LIMIT_REQUEST_LINE=0 \ + GUNICORN_TIMEOUT=60 \ + GUNICORN_WORKERS=2 \ + LANG=C.UTF-8 \ + LC_ALL=C.UTF-8 \ + PYTHONPATH=/etc/superset:/home/superset:$PYTHONPATH \ + SUPERSET_REPO=apache/superset \ + SUPERSET_VERSION=${SUPERSET_VERSION} \ + SUPERSET_PATCHUP_VERSION=${SUPERSET_PATCHUP_VERSION} \ + SUPERSET_PATCHUP_REPO=https://github.com/OpenLMIS/superset-patchup.git@${SUPERSET_PATCHUP_VERSION} \ + SUPERSET_HOME=/var/lib/superset \ + APP_DIR=${APP_DIR} \ + WORKER_CLASS=gevent +ENV GUNICORN_CMD_ARGS="--workers ${GUNICORN_WORKERS} -k ${WORKER_CLASS} --timeout ${GUNICORN_TIMEOUT} --bind ${GUNICORN_BIND} --limit-request-line ${GUNICORN_LIMIT_REQUEST_LINE} --limit-request-field_size ${GUNICORN_LIMIT_REQUEST_FIELD_SIZE}" + +COPY requirements.txt requirements.txt + +# Create superset user & install dependencies +RUN useradd -U -m superset && \ + mkdir /etc/superset && \ + mkdir ${SUPERSET_HOME} && \ + chown -R superset:superset /etc/superset && \ + chown -R superset:superset ${SUPERSET_HOME} && \ + apt-get update && \ + apt-get install -y \ + rsync \ + build-essential \ + curl \ + default-libmysqlclient-dev \ + freetds-bin \ + freetds-dev \ + postgresql-client \ + libssl-dev \ + libffi-dev \ + python-dev \ + libsasl2-dev \ + libldap2-dev && \ + apt-get clean && \ + rm -r /var/lib/apt/lists/* && \ + pip install --upgrade setuptools "pip<24.1" && \ + pip install --no-cache-dir \ + gunicorn==20.1.0 \ + gevent==22.10.2 \ + git+${SUPERSET_PATCHUP_REPO} \ + apache-superset==${SUPERSET_VERSION} \ + apache-superset[cors]==${SUPERSET_VERSION} \ + flask-mail==0.9.1 \ + flask-oauth==0.12 \ + flask_oauthlib==0.9.5 && \ + # MarkupSafe, fix missing 'soft_unicode' from 'markupsafe' + # fix cryptography, flask, werkzeug to last known working versions + pip install -I --no-cache-dir \ + MarkupSafe==2.0.1 \ + cryptography==38.0.2 \ + flask==1.1.4 \ + werkzeug==1.0.1 \ + psycopg2-binary==2.9.1 + +# Configure Filesystem +RUN find ${APP_DIR} \! -type l -print0 | xargs -0 chown superset:superset +COPY superset /usr/local/bin +VOLUME /etc/superset \ + /var/lib/superset +WORKDIR $APP_DIR + +# Node & npm +ENV NVM_DIR=/usr/local/nvm +ENV NODE_VERSION=v16.16.0 + +RUN curl --silent -o- https://raw.githubusercontent.com/creationix/nvm/v0.31.2/install.sh | bash +RUN . $NVM_DIR/nvm.sh \ + && nvm install $NODE_VERSION \ + && nvm alias default $NODE_VERSION \ + && nvm use default + +ENV NODE_PATH=$NVM_DIR/versions/node/$NODE_VERSION/lib/node_modules +ENV PATH=$NVM_DIR/versions/node/$NODE_VERSION/bin:$PATH + +RUN node -v +RUN npm -v + +# Installing dependecies via npm +RUN npm install -g po2json + +# Fetching dependecies and first build +RUN wget -P /tmp https://github.com/apache/superset/archive/${SUPERSET_VERSION}.zip \ + && unzip /tmp/${SUPERSET_VERSION}.zip -d /tmp \ + && rsync -a \ + --remove-source-files \ + --chown=superset:superset \ + /tmp/superset-${SUPERSET_VERSION}/superset-frontend $APP_DIR \ + && rm -r /tmp/${SUPERSET_VERSION}.zip + +USER superset:superset +RUN cd $APP_DIR/superset-frontend && npm ci + +# Deploy application +EXPOSE 8088 +HEALTHCHECK CMD ["curl", "-f", "http://localhost:8088/health"] +CMD ["gunicorn", "superset:app"] diff --git a/settings-sample.env b/settings-sample.env index d46fc85c..f5a174fe 100644 --- a/settings-sample.env +++ b/settings-sample.env @@ -2,14 +2,14 @@ # The base url of the OpenLMIS distribution. Will be used for communication between services. # In case of generated links pointing to the distribution, the PUBLIC_URL should be used instead. -BASE_URL=http://192.168.1.102 +BASE_URL=http://192.168.8.125 # The virtual host for the nginx server - nginx will make services available under this host. -VIRTUAL_HOST=192.168.1.102 +VIRTUAL_HOST=192.168.8.125 # The public url of the OpenLMIS distribution. Should be used in generated links pointing to the distribution. # If this variable is not set, the BASE_URL will be used for the generated links. -PUBLIC_URL=http://192.168.1.102 +PUBLIC_URL=http://192.168.8.125 ############################################################################################################ # Profile: use one of the desired deployment profiles below by uncommenting one (and only one) line below diff --git a/start-local.sh b/start-local.sh index 13678128..e8fc6d1d 100755 --- a/start-local.sh +++ b/start-local.sh @@ -49,7 +49,7 @@ setEnvByIp BOLD=$(tput bold) echo "Starting OpenLMIS Ref-Distro on ${BOLD}${HOST_ADDR}" docker-compose \ - -f docker-compose.yml \ + -f docker-compose.openlmis-prod.yml \ up \ --build \ --remove-orphans \